source
stringlengths
3
92
c
stringlengths
26
2.25M
vel_pr_criteria.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // #ifndef KRATOS_VEL_PR_CRITERIA_H #define KRATOS_VEL_PR_CRITERIA_H /* Project includes */ #include "utilities/openmp_utils.h" #include "includes/model_part.h" #include "includes/define.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" namespace Kratos { ///@addtogroup IncompressibleFluidApplication ///@{ ///@name Kratos Classes ///@{ /// Convergence criteria for fluid problems. /** This class implements a convergence control based on nodal velocity and pressure values. The error is evaluated separately for each of them, and relative and absolute tolerances for both must be specified. */ template< class TSparseSpace, class TDenseSpace > class VelPrCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION( VelPrCriteria ); typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef OpenMPUtils::PartitionVector PartitionVector; typedef std::size_t KeyType; ///@} ///@name Life Cycle ///@{ /// Constructor. /** * @param VelRatioTolerance Relative tolerance for velocity error * @param VelAbsTolerance Absolute tolerance for velocity error * @param PrsRatioTolerance Relative tolerance for presssure error * @param PrsAbsTolerance Absolute tolerance for presssure error */ VelPrCriteria( TDataType VelRatioTolerance, TDataType VelAbsTolerance, TDataType PrsRatioTolerance, TDataType PrsAbsTolerance) : ConvergenceCriteria< TSparseSpace, TDenseSpace >() { mVelRatioTolerance = VelRatioTolerance; mVelAbsTolerance = VelAbsTolerance; mPrRatioTolerance = PrsRatioTolerance; mPrAbsTolerance = PrsAbsTolerance; } /// Destructor. ~VelPrCriteria() override {} ///@} ///@name Operators ///@{ /// Compute relative and absoute error. /** * @param rModelPart Reference to the ModelPart containing the fluid problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param A System matrix (unused) * @param Dx Vector of results (variations on nodal variables) * @param b RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& A, const TSystemVectorType& Dx, const TSystemVectorType& b ) override { if (SparseSpaceType::Size(Dx) != 0) //if we are solving for something { // Initialize TDataType VelSolutionNorm = 0.0; TDataType PrSolutionNorm = 0.0; TDataType VelIncreaseNorm = 0.0; TDataType PrIncreaseNorm = 0.0; unsigned int VelDofNum(0),PrDofNum(0); // Set a partition for OpenMP int NumDofs = rDofSet.size(); PartitionVector DofPartition; int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::DivideInPartitions(NumDofs,NumThreads,DofPartition); // Loop over Dofs #pragma omp parallel reduction(+:VelSolutionNorm,PrSolutionNorm,VelIncreaseNorm,PrIncreaseNorm,VelDofNum,PrDofNum) { int k = OpenMPUtils::ThisThread(); typename DofsArrayType::iterator DofBegin = rDofSet.begin() + DofPartition[k]; typename DofsArrayType::iterator DofEnd = rDofSet.begin() + DofPartition[k+1]; std::size_t DofId; TDataType DofValue; TDataType DofIncr; for (typename DofsArrayType::iterator itDof = DofBegin; itDof != DofEnd; ++itDof) { if (itDof->IsFree()) { DofId = itDof->EquationId(); DofValue = itDof->GetSolutionStepValue(0); DofIncr = Dx[DofId]; KeyType CurrVar = itDof->GetVariable().Key(); if ((CurrVar == VELOCITY_X) || (CurrVar == VELOCITY_Y) || (CurrVar == VELOCITY_Z)) { VelSolutionNorm += DofValue * DofValue; VelIncreaseNorm += DofIncr * DofIncr; ++VelDofNum; } else { PrSolutionNorm += DofValue * DofValue; PrIncreaseNorm += DofIncr * DofIncr; ++PrDofNum; } } } } if(VelSolutionNorm == 0.0) VelSolutionNorm = 1.0; if(PrSolutionNorm == 0.0) PrSolutionNorm = 1.0; TDataType VelRatio = sqrt(VelIncreaseNorm/VelSolutionNorm); TDataType PrRatio = sqrt(PrIncreaseNorm/PrSolutionNorm); TDataType VelAbs = sqrt(VelIncreaseNorm)/ static_cast<TDataType>(VelDofNum); TDataType PrAbs = sqrt(PrIncreaseNorm)/ static_cast<TDataType>(PrDofNum); if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { std::cout << "CONVERGENCE CHECK:" << std::endl; std::cout << " VELOC.: ratio = " << VelRatio <<"; exp.ratio = " << mVelRatioTolerance << " abs = " << VelAbs << " exp.abs = " << mVelAbsTolerance << std::endl; std::cout << " PRESS.: ratio = " << PrRatio <<"; exp.ratio = " << mPrRatioTolerance << " abs = " << PrAbs << " exp.abs = " << mPrAbsTolerance << std::endl; } if ( (VelRatio <= mVelRatioTolerance || VelAbs <= mVelAbsTolerance) && (PrRatio <= mPrRatioTolerance || PrAbs <= mPrAbsTolerance) ) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { std::cout << "*** CONVERGENCE IS ACHIEVED ***" << std::endl; } return true; } else { return false; } } else //in this case all the displacements are imposed! { return true; } } /// Initialize this class before using it /** * @param rModelPart Reference to the ModelPart containing the fluid problem. (unused) */ void Initialize( ModelPart& rModelPart ) override { BaseType::mConvergenceCriteriaIsInitialized = true; } void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& A, const TSystemVectorType& Dx, const TSystemVectorType& b ) override {} void FinalizeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& A, const TSystemVectorType& Dx, const TSystemVectorType& b ) override {} ///@} // Operations private: TDataType mVelRatioTolerance; TDataType mVelAbsTolerance; TDataType mPrRatioTolerance; TDataType mPrAbsTolerance; }; ///@} // Kratos classes ///@} // Application group } #endif /* _VEL_PR_CRITERIA_H */
SLSVMLossC2.c
/* Hakan Bilen * August 5, 2015 * * Implementation of soft-max latent SVM in * "Weakly Supervised Object Detection with Posterior Regularization" in * BMVC 2014. * * Warning : posterior regularization for symmetry and mutual exclusion are * not implemented in this file! */ #include <math.h> #include <limits.h> #include <omp.h> #include "mex.h" /* This function may not exit gracefully on bad input! */ float myLogSumExp(const float * vec, int dim) ; void computeProb(const float * in, int dim, float * out) ; { /* Variable Declarations */ double *w, f, *g, *y; int nVars, nImgs; float lambda, beta; float lpos2neg,lneg2pos; /* Get Input Pointers */ w = mxGetPr(prhs[0]); y = mxGetPr(prhs[2]); lambda = mxGetScalar(prhs[3]); beta = mxGetScalar(prhs[4]); float np = 0; float nn = 0; nImgs = (int)mxGetNumberOfElements(prhs[2]); nVars = (int)mxGetNumberOfElements(prhs[0]); int ifield, nfields; mwIndex jstruct; mwSize NStructElems; mwSize ndim; if(!mxIsStruct(prhs[1])) mexErrMsgIdAndTxt( "MATLAB:SLSVMC2:inputNotStruct", "Input must be a structure."); /* get input arguments */ nfields = mxGetNumberOfFields(prhs[1]); NStructElems = mxGetNumberOfElements(prhs[1]); if (NStructElems!=nImgs) mexErrMsgIdAndTxt( "MATLAB:SLSVMC2:WrongNumImgs", "Wrong number of images!"); /*number of features (boxes) for each image */ int * nBoxes = mxCalloc(nImgs,sizeof(int)); int * cumNBoxes = mxCalloc(nImgs+1,sizeof(int)); cumNBoxes[0] = 0; int i,b,d; for(i=1;i<=nImgs;i++) { const mxArray *tmp = mxGetFieldByNumber(prhs[1], i-1, 0); if(tmp == NULL) { mexPrintf("%s%d\t%s%d\n", "FIELD: ", ifield+1, "STRUCT INDEX :", 1); mexErrMsgIdAndTxt( "MATLAB:data:fieldEmpty", "Above field is empty!"); } nBoxes[i-1] = (int)mxGetDimensions(tmp)[1]; if (mxGetDimensions(tmp)[0]!=nVars) mexErrMsgIdAndTxt("MATLAB:SLSVMC2:wrongDim","Wrong feature dimensionality!"); cumNBoxes[i] = cumNBoxes[i-1] + nBoxes[i-1]; } /* mexPrintf("X[0,end] %f\n",X[4096]); mexPrintf("nImgs %d nVars %d\n",nImgs,nVars); mexPrintf("X[2,0] %f\n",X[2*4097]); mexPrintf("X[2,end] %f\n",X[3*4097-1]); */ /* Allocated Memory for Function Variables */ /* plhs[0] = mxCreateDoubleScalar(0); */ plhs[1] = mxCreateDoubleMatrix(nVars,1,mxREAL); g = mxGetPr(plhs[1]); float * fs = mxCalloc(nImgs,sizeof(float)); float * gs = mxCalloc(nImgs*nVars,sizeof(float)); /* get number of positives and negatives */ for(i=0;i<nImgs;i++) { if(y[i]>0) { np++; } else if(y[i]<0) { nn++; } } if (nn==0 || np==0) mexErrMsgIdAndTxt( "MATLAB:data:wlabel", "No pos or neg label!"); /* balanced loss for pos and neg */ lpos2neg = 0.5 * (np+nn) / np; lneg2pos = 0.5 * (np+nn) / nn; float ** convProbs = (float **)mxCalloc(nImgs,sizeof(float*)); float ** concProbs = (float **)mxCalloc(nImgs,sizeof(float*)); float ** scores = (float **)mxCalloc(nImgs,sizeof(float*)); float ** augScores = (float **)mxCalloc(nImgs,sizeof(float*)); for(i=0;i<nImgs;i++) { convProbs[i] = (float *)mxCalloc(2*(int)nBoxes[i],sizeof(float)); concProbs[i] = (float *)mxCalloc((int)nBoxes[i],sizeof(float)); scores[i] = (float *)mxCalloc((int)nBoxes[i],sizeof(float)); augScores[i] = (float *)mxCalloc(2*(int)nBoxes[i],sizeof(float)); } #pragma omp parallel for schedule(dynamic) private(i) for(i=0;i<nImgs;i++) { if(y[i]==0) continue; const mxArray *tmp = mxGetFieldByNumber(prhs[1], i, 0); if(tmp == NULL) { mexPrintf("%s%d\t%s%d\n", "FIELD: ", ifield+1, "STRUCT INDEX :", 1); mexErrMsgIdAndTxt( "MATLAB:data:fieldEmpty", "Above field is empty!"); } const float * x = (float *)mxGetData(tmp); int nB = (int)nBoxes[i]; if((int)mxGetDimensions(tmp)[1]!=nB) mexErrMsgIdAndTxt("MATLAB:SLSVMC2:empty","mxGetDimensions(tmp)[1]!=nB"); if(nB==0) mexErrMsgIdAndTxt("MATLAB:SLSVMC2:zeroval","zero num bb"); if (mxGetDimensions(tmp)[0]!=nVars) mexErrMsgIdAndTxt("MATLAB:SLSVMC2:wrongdim","wrong feat dim"); /* mexPrintf("y[%d] = %f nB %d\n",i,y[i],nB); */ float concScore = 0; int b, d; for(b=0;b<nB;b++) { for(d=0;d<nVars;d++) { scores[i][b] += (float)w[d] * x[nVars*b+d]; } scores[i][b] *= beta; } /* concave part */ if(y[i]>0) { concScore = myLogSumExp(scores[i],nB); computeProb(scores[i],nB,concProbs[i]); } else if(y[i]<0) { concScore = logf((float)nB); for(b=0;b<nB;b++) { concProbs[i][b] = 0; } } else { mexErrMsgIdAndTxt("MATLAB:SLSVMC2:wlabel","wrong label"); } /* convex part */ if(y[i]>0) { for(b=0;b<nB;b++) { augScores[i][b] = scores[i][b]; augScores[i][b+nB] = beta * lpos2neg; } } else if(y[i]<0) { for(b=0;b<nB;b++) { augScores[i][b] = scores[i][b] + beta * lneg2pos; augScores[i][b+nB] = 0; } } else { mexErrMsgIdAndTxt("MATLAB:SLSVMC2:wlabel","wrong label"); } computeProb(augScores[i],2*nB,convProbs[i]); for(b=0;b<nB;b++) { float difp = (convProbs[i][b]-concProbs[i][b]); for(d=0;d<nVars;d++) { gs[i*nVars + d] += x[nVars*b+d] * difp; } } float convScore = myLogSumExp(augScores[i],2*nB); fs[i] = convScore - concScore; } for(i=0;i<nImgs;i++) { mxFree(augScores[i]); mxFree(scores[i]); mxFree(convProbs[i]); mxFree(concProbs[i]); } mxFree(augScores); mxFree(scores); mxFree(convProbs); mxFree(concProbs); /* sum objval and grads over all images */ for(i=0;i<nImgs;i++) { if(y[i]==0) continue; f += fs[i]; for(d=0;d<nVars;d++) { g[d] += gs[i*nVars+d]; } } for(d=0;d<nVars;d++) { g[d] /= (nn+np); } f /= beta * (nn+np); /* add regularization */ for(d=0;d<nVars-1;d++) { f += 0.5 * lambda * w[d] * w[d] ; } for(d=0;d<nVars-1;d++) { g[d] += lambda * w[d] ; } mxFree(cumNBoxes); mxFree(nBoxes); mxFree(gs); mxFree(fs); /* mxFree(gconcProbs); mxFree(gconvProbs); mxFree(gscores); mxFree(gaugScores); */ plhs[0] = mxCreateDoubleScalar(f); } /*---------------------------------------------------------------------------*/ float myLogSumExp(const float * vec, int dim) { float maxScore = -FLT_MAX ; int i=0; for (i=0;i<dim;i++) { if(maxScore<vec[i]) maxScore = vec[i]; } float sumScore = 0.f; for (i=0;i<dim;i++) { sumScore += expf(vec[i]-maxScore); } return logf(sumScore)+maxScore; } /*---------------------------------------------------------------------------*/ void computeProb(const float * in, int dim, float * out) { float maxScore = -FLT_MAX ; int i=0; for (i=0;i<dim;i++) { if(maxScore<in[i]) maxScore = in[i]; } float sumExp = 0.f; for (i=0;i<dim;i++) { sumExp += expf(in[i]-maxScore); } mxAssert(sumExp>0.f,""); const float rSumExp = 1.f / sumExp; for (i=0;i<dim;i++) { out[i] = expf(in[i]-maxScore) * rSumExp; } }
GB_unop__asinh_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__asinh_fp32_fp32) // op(A') function: GB (_unop_tran__asinh_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = asinhf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = asinhf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = asinhf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ASINH || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__asinh_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = asinhf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = asinhf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__asinh_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
statistic.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC % % SS T A A T I SS T I C % % SSS T AAAAA T I SSS T I C % % SS T A A T I SS T I C % % SSSSS T A A T IIIII SSSSS T IIIII CCCC % % % % % % MagickCore Image Statistical Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/image-private.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/timer.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E v a l u a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EvaluateImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the EvaluateImage method is: % % MagickBooleanType EvaluateImage(Image *image, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % MagickBooleanType EvaluateImages(Image *images, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o op: A channel op. % % o value: A value value. % % o exception: return any errors or warnings in this structure. % */ typedef struct _PixelChannels { double channel[MaxPixelChannels]; } PixelChannels; static PixelChannels **DestroyPixelThreadSet(const Image *images, PixelChannels **pixels) { register ssize_t i; size_t rows; assert(pixels != (PixelChannels **) NULL); rows=MagickMax(GetImageListLength(images),(size_t) GetMagickResourceLimit(ThreadResource)); for (i=0; i < (ssize_t) rows; i++) if (pixels[i] != (PixelChannels *) NULL) pixels[i]=(PixelChannels *) RelinquishMagickMemory(pixels[i]); pixels=(PixelChannels **) RelinquishMagickMemory(pixels); return(pixels); } static PixelChannels **AcquirePixelThreadSet(const Image *images) { const Image *next; PixelChannels **pixels; register ssize_t i; size_t columns, number_images, rows; number_images=GetImageListLength(images); rows=MagickMax(number_images,(size_t) GetMagickResourceLimit(ThreadResource)); pixels=(PixelChannels **) AcquireQuantumMemory(rows,sizeof(*pixels)); if (pixels == (PixelChannels **) NULL) return((PixelChannels **) NULL); (void) memset(pixels,0,rows*sizeof(*pixels)); columns=MagickMax(number_images,MaxPixelChannels); for (next=images; next != (Image *) NULL; next=next->next) columns=MagickMax(next->columns,columns); for (i=0; i < (ssize_t) rows; i++) { register ssize_t j; pixels[i]=(PixelChannels *) AcquireQuantumMemory(columns,sizeof(**pixels)); if (pixels[i] == (PixelChannels *) NULL) return(DestroyPixelThreadSet(images,pixels)); for (j=0; j < (ssize_t) columns; j++) { register ssize_t k; for (k=0; k < MaxPixelChannels; k++) pixels[i][j].channel[k]=0.0; } } return(pixels); } static inline double EvaluateMax(const double x,const double y) { if (x > y) return(x); return(y); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { const PixelChannels *color_1, *color_2; double distance; register ssize_t i; color_1=(const PixelChannels *) x; color_2=(const PixelChannels *) y; distance=0.0; for (i=0; i < MaxPixelChannels; i++) distance+=color_1->channel[i]-(double) color_2->channel[i]; return(distance < 0 ? -1 : distance > 0 ? 1 : 0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static double ApplyEvaluateOperator(RandomInfo *random_info,const Quantum pixel, const MagickEvaluateOperator op,const double value) { double result; register ssize_t i; result=0.0; switch (op) { case UndefinedEvaluateOperator: break; case AbsEvaluateOperator: { result=(double) fabs((double) (pixel+value)); break; } case AddEvaluateOperator: { result=(double) (pixel+value); break; } case AddModulusEvaluateOperator: { /* This returns a 'floored modulus' of the addition which is a positive result. It differs from % or fmod() that returns a 'truncated modulus' result, where floor() is replaced by trunc() and could return a negative result (which is clipped). */ result=pixel+value; result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0)); break; } case AndEvaluateOperator: { result=(double) ((ssize_t) pixel & (ssize_t) (value+0.5)); break; } case CosineEvaluateOperator: { result=(double) (QuantumRange*(0.5*cos((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case DivideEvaluateOperator: { result=pixel/(value == 0.0 ? 1.0 : value); break; } case ExponentialEvaluateOperator: { result=(double) (QuantumRange*exp((double) (value*QuantumScale*pixel))); break; } case GaussianNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel,GaussianNoise, value); break; } case ImpulseNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel,ImpulseNoise, value); break; } case LaplacianNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel, LaplacianNoise,value); break; } case LeftShiftEvaluateOperator: { result=(double) pixel; for (i=0; i < (ssize_t) value; i++) result*=2.0; break; } case LogEvaluateOperator: { if ((QuantumScale*pixel) >= MagickEpsilon) result=(double) (QuantumRange*log((double) (QuantumScale*value*pixel+ 1.0))/log((double) (value+1.0))); break; } case MaxEvaluateOperator: { result=(double) EvaluateMax((double) pixel,value); break; } case MeanEvaluateOperator: { result=(double) (pixel+value); break; } case MedianEvaluateOperator: { result=(double) (pixel+value); break; } case MinEvaluateOperator: { result=(double) MagickMin((double) pixel,value); break; } case MultiplicativeNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel, MultiplicativeGaussianNoise,value); break; } case MultiplyEvaluateOperator: { result=(double) (value*pixel); break; } case OrEvaluateOperator: { result=(double) ((ssize_t) pixel | (ssize_t) (value+0.5)); break; } case PoissonNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel,PoissonNoise, value); break; } case PowEvaluateOperator: { if (pixel < 0) result=(double) -(QuantumRange*pow((double) -(QuantumScale*pixel), (double) value)); else result=(double) (QuantumRange*pow((double) (QuantumScale*pixel), (double) value)); break; } case RightShiftEvaluateOperator: { result=(double) pixel; for (i=0; i < (ssize_t) value; i++) result/=2.0; break; } case RootMeanSquareEvaluateOperator: { result=((double) pixel*pixel+value); break; } case SetEvaluateOperator: { result=value; break; } case SineEvaluateOperator: { result=(double) (QuantumRange*(0.5*sin((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case SubtractEvaluateOperator: { result=(double) (pixel-value); break; } case SumEvaluateOperator: { result=(double) (pixel+value); break; } case ThresholdEvaluateOperator: { result=(double) (((double) pixel <= value) ? 0 : QuantumRange); break; } case ThresholdBlackEvaluateOperator: { result=(double) (((double) pixel <= value) ? 0 : pixel); break; } case ThresholdWhiteEvaluateOperator: { result=(double) (((double) pixel > value) ? QuantumRange : pixel); break; } case UniformNoiseEvaluateOperator: { result=(double) GenerateDifferentialNoise(random_info,pixel,UniformNoise, value); break; } case XorEvaluateOperator: { result=(double) ((ssize_t) pixel ^ (ssize_t) (value+0.5)); break; } } return(result); } static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception) { const Image *p, *q; size_t columns, rows; q=images; columns=images->columns; rows=images->rows; for (p=images; p != (Image *) NULL; p=p->next) { if (p->number_channels > q->number_channels) q=p; if (p->columns > columns) columns=p->columns; if (p->rows > rows) rows=p->rows; } return(CloneImage(q,columns,rows,MagickTrue,exception)); } MagickExport Image *EvaluateImages(const Image *images, const MagickEvaluateOperator op,ExceptionInfo *exception) { #define EvaluateImageTag "Evaluate/Image" CacheView *evaluate_view, **image_view; const Image *next; Image *image; MagickBooleanType status; MagickOffsetType progress; PixelChannels **magick_restrict evaluate_pixels; RandomInfo **magick_restrict random_info; size_t number_images; ssize_t j, y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImageCanvas(images,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { image=DestroyImage(image); return((Image *) NULL); } number_images=GetImageListLength(images); evaluate_pixels=AcquirePixelThreadSet(images); if (evaluate_pixels == (PixelChannels **) NULL) { image=DestroyImage(image); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return((Image *) NULL); } image_view=(CacheView **) AcquireQuantumMemory(number_images, sizeof(*image_view)); if (image_view == (CacheView **) NULL) { image=DestroyImage(image); evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(image); } next=images; for (j=0; j < (ssize_t) number_images; j++) { image_view[j]=AcquireVirtualCacheView(next,exception); next=GetNextImageInList(next); } /* Evaluate image pixels. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); evaluate_view=AcquireAuthenticCacheView(image,exception); if (op == MedianEvaluateOperator) { #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,images,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Image *next; const int id = GetOpenMPThreadId(); const Quantum **p; register PixelChannels *evaluate_pixel; register Quantum *magick_restrict q; register ssize_t x; ssize_t j; if (status == MagickFalse) continue; p=(const Quantum **) AcquireQuantumMemory(number_images,sizeof(*p)); if (p == (const Quantum **) NULL) { status=MagickFalse; (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", images->filename); continue; } for (j=0; j < (ssize_t) number_images; j++) { p[j]=GetCacheViewVirtualPixels(image_view[j],0,y,image->columns,1, exception); if (p[j] == (const Quantum *) NULL) break; } q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1, exception); if ((j < (ssize_t) number_images) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } evaluate_pixel=evaluate_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; next=images; for (j=0; j < (ssize_t) number_images; j++) { for (i=0; i < MaxPixelChannels; i++) evaluate_pixel[j].channel[i]=0.0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(next,channel); PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (evaluate_traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; evaluate_pixel[j].channel[i]=ApplyEvaluateOperator( random_info[id],GetPixelChannel(next,channel,p[j]),op, evaluate_pixel[j].channel[i]); } p[j]+=GetPixelChannels(next); next=GetNextImageInList(next); } qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel), IntensityCompare); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; q[i]=ClampToQuantum(evaluate_pixel[number_images/2].channel[i]); } q+=GetPixelChannels(image); } p=(const Quantum **) RelinquishMagickMemory(p); if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,EvaluateImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } else { #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,images,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Image *next; const int id = GetOpenMPThreadId(); const Quantum **p; register ssize_t i, x; register PixelChannels *evaluate_pixel; register Quantum *magick_restrict q; ssize_t j; if (status == MagickFalse) continue; p=(const Quantum **) AcquireQuantumMemory(number_images,sizeof(*p)); if (p == (const Quantum **) NULL) { status=MagickFalse; (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", images->filename); continue; } for (j=0; j < (ssize_t) number_images; j++) { p[j]=GetCacheViewVirtualPixels(image_view[j],0,y,image->columns,1, exception); if (p[j] == (const Quantum *) NULL) break; } q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1, exception); if ((j < (ssize_t) number_images) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } evaluate_pixel=evaluate_pixels[id]; for (j=0; j < (ssize_t) image->columns; j++) for (i=0; i < MaxPixelChannels; i++) evaluate_pixel[j].channel[i]=0.0; next=images; for (j=0; j < (ssize_t) number_images; j++) { for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(next); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(next,channel); PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (evaluate_traits == UndefinedPixelTrait)) continue; if ((traits & UpdatePixelTrait) == 0) continue; evaluate_pixel[x].channel[i]=ApplyEvaluateOperator( random_info[id],GetPixelChannel(next,channel,p[j]),j == 0 ? AddEvaluateOperator : op,evaluate_pixel[x].channel[i]); } p[j]+=GetPixelChannels(next); } next=GetNextImageInList(next); } for (x=0; x < (ssize_t) image->columns; x++) { switch (op) { case MeanEvaluateOperator: { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) evaluate_pixel[x].channel[i]/=(double) number_images; break; } case MultiplyEvaluateOperator: { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; for (j=0; j < (ssize_t) (number_images-1); j++) evaluate_pixel[x].channel[i]*=QuantumScale; } break; } case RootMeanSquareEvaluateOperator: { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) evaluate_pixel[x].channel[i]=sqrt(evaluate_pixel[x].channel[i]/ number_images); break; } default: break; } } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; q[i]=ClampToQuantum(evaluate_pixel[x].channel[i]); } q+=GetPixelChannels(image); } p=(const Quantum **) RelinquishMagickMemory(p); if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,EvaluateImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } for (j=0; j < (ssize_t) number_images; j++) image_view[j]=DestroyCacheView(image_view[j]); image_view=(CacheView **) RelinquishMagickMemory(image_view); evaluate_view=DestroyCacheView(evaluate_view); evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) image=DestroyImage(image); return(image); } MagickExport MagickBooleanType EvaluateImage(Image *image, const MagickEvaluateOperator op,const double value,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double result; register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & CopyPixelTrait) != 0) continue; if ((traits & UpdatePixelTrait) == 0) continue; result=ApplyEvaluateOperator(random_info[id],q[i],op,value); if (op == MeanEvaluateOperator) result/=2.0; q[i]=ClampToQuantum(result); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EvaluateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F u n c t i o n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FunctionImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the FunctionImage method is: % % MagickBooleanType FunctionImage(Image *image, % const MagickFunction function,const ssize_t number_parameters, % const double *parameters,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o function: A channel function. % % o parameters: one or more parameters. % % o exception: return any errors or warnings in this structure. % */ static Quantum ApplyFunction(Quantum pixel,const MagickFunction function, const size_t number_parameters,const double *parameters, ExceptionInfo *exception) { double result; register ssize_t i; (void) exception; result=0.0; switch (function) { case PolynomialFunction: { /* Polynomial: polynomial constants, highest to lowest order (e.g. c0*x^3+ c1*x^2+c2*x+c3). */ result=0.0; for (i=0; i < (ssize_t) number_parameters; i++) result=result*QuantumScale*pixel+parameters[i]; result*=QuantumRange; break; } case SinusoidFunction: { double amplitude, bias, frequency, phase; /* Sinusoid: frequency, phase, amplitude, bias. */ frequency=(number_parameters >= 1) ? parameters[0] : 1.0; phase=(number_parameters >= 2) ? parameters[1] : 0.0; amplitude=(number_parameters >= 3) ? parameters[2] : 0.5; bias=(number_parameters >= 4) ? parameters[3] : 0.5; result=(double) (QuantumRange*(amplitude*sin((double) (2.0* MagickPI*(frequency*QuantumScale*pixel+phase/360.0)))+bias)); break; } case ArcsinFunction: { double bias, center, range, width; /* Arcsin (peged at range limits for invalid results): width, center, range, and bias. */ width=(number_parameters >= 1) ? parameters[0] : 1.0; center=(number_parameters >= 2) ? parameters[1] : 0.5; range=(number_parameters >= 3) ? parameters[2] : 1.0; bias=(number_parameters >= 4) ? parameters[3] : 0.5; result=2.0/width*(QuantumScale*pixel-center); if ( result <= -1.0 ) result=bias-range/2.0; else if (result >= 1.0) result=bias+range/2.0; else result=(double) (range/MagickPI*asin((double) result)+bias); result*=QuantumRange; break; } case ArctanFunction: { double center, bias, range, slope; /* Arctan: slope, center, range, and bias. */ slope=(number_parameters >= 1) ? parameters[0] : 1.0; center=(number_parameters >= 2) ? parameters[1] : 0.5; range=(number_parameters >= 3) ? parameters[2] : 1.0; bias=(number_parameters >= 4) ? parameters[3] : 0.5; result=(double) (MagickPI*slope*(QuantumScale*pixel-center)); result=(double) (QuantumRange*(range/MagickPI*atan((double) result)+bias)); break; } case UndefinedFunction: break; } return(ClampToQuantum(result)); } MagickExport MagickBooleanType FunctionImage(Image *image, const MagickFunction function,const size_t number_parameters, const double *parameters,ExceptionInfo *exception) { #define FunctionImageTag "Function/Image " CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateFunctionImage(image,function,number_parameters,parameters, exception) != MagickFalse) return(MagickTrue); #endif if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ApplyFunction(q[i],function,number_parameters,parameters, exception); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FunctionImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e E n t r o p y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageEntropy() returns the entropy of one or more image channels. % % The format of the GetImageEntropy method is: % % MagickBooleanType GetImageEntropy(const Image *image,double *entropy, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o entropy: the average entropy of the selected channels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageEntropy(const Image *image, double *entropy,ExceptionInfo *exception) { ChannelStatistics *channel_statistics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); *entropy=channel_statistics[CompositePixelChannel].entropy; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtrema() returns the extrema of one or more image channels. % % The format of the GetImageExtrema method is: % % MagickBooleanType GetImageExtrema(const Image *image,size_t *minima, % size_t *maxima,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageExtrema(const Image *image, size_t *minima,size_t *maxima,ExceptionInfo *exception) { double max, min; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=GetImageRange(image,&min,&max,exception); *minima=(size_t) ceil(min-0.5); *maxima=(size_t) floor(max+0.5); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e K u r t o s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageKurtosis() returns the kurtosis and skewness of one or more image % channels. % % The format of the GetImageKurtosis method is: % % MagickBooleanType GetImageKurtosis(const Image *image,double *kurtosis, % double *skewness,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kurtosis: the kurtosis of the channel. % % o skewness: the skewness of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageKurtosis(const Image *image, double *kurtosis,double *skewness,ExceptionInfo *exception) { ChannelStatistics *channel_statistics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); *kurtosis=channel_statistics[CompositePixelChannel].kurtosis; *skewness=channel_statistics[CompositePixelChannel].skewness; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M e a n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMean() returns the mean and standard deviation of one or more image % channels. % % The format of the GetImageMean method is: % % MagickBooleanType GetImageMean(const Image *image,double *mean, % double *standard_deviation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mean: the average value in the channel. % % o standard_deviation: the standard deviation of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean, double *standard_deviation,ExceptionInfo *exception) { ChannelStatistics *channel_statistics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); *mean=channel_statistics[CompositePixelChannel].mean; *standard_deviation= channel_statistics[CompositePixelChannel].standard_deviation; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M o m e n t s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMoments() returns the normalized moments of one or more image % channels. % % The format of the GetImageMoments method is: % % ChannelMoments *GetImageMoments(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static size_t GetImageChannels(const Image *image) { register ssize_t i; size_t channels; channels=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; channels++; } return((size_t) (channels == 0 ? 1 : channels)); } MagickExport ChannelMoments *GetImageMoments(const Image *image, ExceptionInfo *exception) { #define MaxNumberImageMoments 8 CacheView *image_view; ChannelMoments *channel_moments; double M00[MaxPixelChannels+1], M01[MaxPixelChannels+1], M02[MaxPixelChannels+1], M03[MaxPixelChannels+1], M10[MaxPixelChannels+1], M11[MaxPixelChannels+1], M12[MaxPixelChannels+1], M20[MaxPixelChannels+1], M21[MaxPixelChannels+1], M22[MaxPixelChannels+1], M30[MaxPixelChannels+1]; PointInfo centroid[MaxPixelChannels+1]; ssize_t channel, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_moments=(ChannelMoments *) AcquireQuantumMemory(MaxPixelChannels+1, sizeof(*channel_moments)); if (channel_moments == (ChannelMoments *) NULL) return(channel_moments); (void) memset(channel_moments,0,(MaxPixelChannels+1)* sizeof(*channel_moments)); (void) memset(centroid,0,sizeof(centroid)); (void) memset(M00,0,sizeof(M00)); (void) memset(M01,0,sizeof(M01)); (void) memset(M02,0,sizeof(M02)); (void) memset(M03,0,sizeof(M03)); (void) memset(M10,0,sizeof(M10)); (void) memset(M11,0,sizeof(M11)); (void) memset(M12,0,sizeof(M12)); (void) memset(M20,0,sizeof(M20)); (void) memset(M21,0,sizeof(M21)); (void) memset(M22,0,sizeof(M22)); (void) memset(M30,0,sizeof(M30)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; /* Compute center of mass (centroid). */ p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; M00[channel]+=QuantumScale*p[i]; M00[MaxPixelChannels]+=QuantumScale*p[i]; M10[channel]+=x*QuantumScale*p[i]; M10[MaxPixelChannels]+=x*QuantumScale*p[i]; M01[channel]+=y*QuantumScale*p[i]; M01[MaxPixelChannels]+=y*QuantumScale*p[i]; } p+=GetPixelChannels(image); } } for (channel=0; channel <= MaxPixelChannels; channel++) { /* Compute center of mass (centroid). */ centroid[channel].x=M10[channel]*PerceptibleReciprocal(M00[channel]); centroid[channel].y=M01[channel]*PerceptibleReciprocal(M00[channel]); } for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; /* Compute the image moments. */ p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; M11[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)* QuantumScale*p[i]; M11[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)* QuantumScale*p[i]; M20[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* QuantumScale*p[i]; M20[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* QuantumScale*p[i]; M02[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)* QuantumScale*p[i]; M02[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)* QuantumScale*p[i]; M21[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*QuantumScale*p[i]; M21[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*QuantumScale*p[i]; M12[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; M12[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; M22[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i]; M22[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i]; M30[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (x-centroid[channel].x)*QuantumScale*p[i]; M30[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)* (x-centroid[channel].x)*QuantumScale*p[i]; M03[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; M03[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)* (y-centroid[channel].y)*QuantumScale*p[i]; } p+=GetPixelChannels(image); } } M00[MaxPixelChannels]/=GetImageChannels(image); M01[MaxPixelChannels]/=GetImageChannels(image); M02[MaxPixelChannels]/=GetImageChannels(image); M03[MaxPixelChannels]/=GetImageChannels(image); M10[MaxPixelChannels]/=GetImageChannels(image); M11[MaxPixelChannels]/=GetImageChannels(image); M12[MaxPixelChannels]/=GetImageChannels(image); M20[MaxPixelChannels]/=GetImageChannels(image); M21[MaxPixelChannels]/=GetImageChannels(image); M22[MaxPixelChannels]/=GetImageChannels(image); M30[MaxPixelChannels]/=GetImageChannels(image); for (channel=0; channel <= MaxPixelChannels; channel++) { /* Compute elliptical angle, major and minor axes, eccentricity, & intensity. */ channel_moments[channel].centroid=centroid[channel]; channel_moments[channel].ellipse_axis.x=sqrt((2.0* PerceptibleReciprocal(M00[channel]))*((M20[channel]+M02[channel])+ sqrt(4.0*M11[channel]*M11[channel]+(M20[channel]-M02[channel])* (M20[channel]-M02[channel])))); channel_moments[channel].ellipse_axis.y=sqrt((2.0* PerceptibleReciprocal(M00[channel]))*((M20[channel]+M02[channel])- sqrt(4.0*M11[channel]*M11[channel]+(M20[channel]-M02[channel])* (M20[channel]-M02[channel])))); channel_moments[channel].ellipse_angle=RadiansToDegrees(1.0/2.0*atan(2.0* M11[channel]*PerceptibleReciprocal(M20[channel]-M02[channel]))); if (fabs(M11[channel]) < 0.0) { if ((fabs(M20[channel]-M02[channel]) >= 0.0) && ((M20[channel]-M02[channel]) < 0.0)) channel_moments[channel].ellipse_angle+=90.0; } else if (M11[channel] < 0.0) { if (fabs(M20[channel]-M02[channel]) >= 0.0) { if ((M20[channel]-M02[channel]) < 0.0) channel_moments[channel].ellipse_angle+=90.0; else channel_moments[channel].ellipse_angle+=180.0; } } else if ((fabs(M20[channel]-M02[channel]) >= 0.0) && ((M20[channel]-M02[channel]) < 0.0)) channel_moments[channel].ellipse_angle+=90.0; channel_moments[channel].ellipse_eccentricity=sqrt(1.0-( channel_moments[channel].ellipse_axis.y* channel_moments[channel].ellipse_axis.y*PerceptibleReciprocal( channel_moments[channel].ellipse_axis.x* channel_moments[channel].ellipse_axis.x))); channel_moments[channel].ellipse_intensity=M00[channel]* PerceptibleReciprocal(MagickPI*channel_moments[channel].ellipse_axis.x* channel_moments[channel].ellipse_axis.y+MagickEpsilon); } for (channel=0; channel <= MaxPixelChannels; channel++) { /* Normalize image moments. */ M10[channel]=0.0; M01[channel]=0.0; M11[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(1.0+1.0)/2.0)); M20[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(2.0+0.0)/2.0)); M02[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(0.0+2.0)/2.0)); M21[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(2.0+1.0)/2.0)); M12[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(1.0+2.0)/2.0)); M22[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(2.0+2.0)/2.0)); M30[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(3.0+0.0)/2.0)); M03[channel]*=PerceptibleReciprocal(pow(M00[channel],1.0+(0.0+3.0)/2.0)); M00[channel]=1.0; } image_view=DestroyCacheView(image_view); for (channel=0; channel <= MaxPixelChannels; channel++) { /* Compute Hu invariant moments. */ channel_moments[channel].invariant[0]=M20[channel]+M02[channel]; channel_moments[channel].invariant[1]=(M20[channel]-M02[channel])* (M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel]; channel_moments[channel].invariant[2]=(M30[channel]-3.0*M12[channel])* (M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])* (3.0*M21[channel]-M03[channel]); channel_moments[channel].invariant[3]=(M30[channel]+M12[channel])* (M30[channel]+M12[channel])+(M21[channel]+M03[channel])* (M21[channel]+M03[channel]); channel_moments[channel].invariant[4]=(M30[channel]-3.0*M12[channel])* (M30[channel]+M12[channel])*((M30[channel]+M12[channel])* (M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])* (M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])* (M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])* (M30[channel]+M12[channel])-(M21[channel]+M03[channel])* (M21[channel]+M03[channel])); channel_moments[channel].invariant[5]=(M20[channel]-M02[channel])* ((M30[channel]+M12[channel])*(M30[channel]+M12[channel])- (M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+ 4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]); channel_moments[channel].invariant[6]=(3.0*M21[channel]-M03[channel])* (M30[channel]+M12[channel])*((M30[channel]+M12[channel])* (M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])* (M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])* (M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])* (M30[channel]+M12[channel])-(M21[channel]+M03[channel])* (M21[channel]+M03[channel])); channel_moments[channel].invariant[7]=M11[channel]*((M30[channel]+ M12[channel])*(M30[channel]+M12[channel])-(M03[channel]+M21[channel])* (M03[channel]+M21[channel]))-(M20[channel]-M02[channel])* (M30[channel]+M12[channel])*(M03[channel]+M21[channel]); } if (y < (ssize_t) image->rows) channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments); return(channel_moments); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l P e r c e p t u a l H a s h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePerceptualHash() returns the perceptual hash of one or more % image channels. % % The format of the GetImagePerceptualHash method is: % % ChannelPerceptualHash *GetImagePerceptualHash(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image, ExceptionInfo *exception) { ChannelPerceptualHash *perceptual_hash; char *colorspaces, *q; const char *artifact; MagickBooleanType status; register char *p; register ssize_t i; perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory( MaxPixelChannels+1UL,sizeof(*perceptual_hash)); if (perceptual_hash == (ChannelPerceptualHash *) NULL) return((ChannelPerceptualHash *) NULL); artifact=GetImageArtifact(image,"phash:colorspaces"); if (artifact != NULL) colorspaces=AcquireString(artifact); else colorspaces=AcquireString("sRGB,HCLp"); perceptual_hash[0].number_colorspaces=0; perceptual_hash[0].number_channels=0; q=colorspaces; for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++) { ChannelMoments *moments; Image *hash_image; size_t j; ssize_t channel, colorspace; if (i >= MaximumNumberOfPerceptualColorspaces) break; colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p); if (colorspace < 0) break; perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace; hash_image=BlurImage(image,0.0,1.0,exception); if (hash_image == (Image *) NULL) break; hash_image->depth=8; status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace, exception); if (status == MagickFalse) break; moments=GetImageMoments(hash_image,exception); perceptual_hash[0].number_colorspaces++; perceptual_hash[0].number_channels+=GetImageChannels(hash_image); hash_image=DestroyImage(hash_image); if (moments == (ChannelMoments *) NULL) break; for (channel=0; channel <= MaxPixelChannels; channel++) for (j=0; j < MaximumNumberOfImageMoments; j++) perceptual_hash[channel].phash[i][j]= (-MagickLog10(moments[channel].invariant[j])); moments=(ChannelMoments *) RelinquishMagickMemory(moments); } colorspaces=DestroyString(colorspaces); return(perceptual_hash); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e R a n g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageRange() returns the range of one or more image channels. % % The format of the GetImageRange method is: % % MagickBooleanType GetImageRange(const Image *image,double *minima, % double *maxima,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageRange(const Image *image,double *minima, double *maxima,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType initialize, status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; initialize=MagickTrue; *maxima=0.0; *minima=0.0; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status,initialize) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double row_maxima = 0.0, row_minima = 0.0; MagickBooleanType row_initialize; register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } row_initialize=MagickTrue; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; if (row_initialize != MagickFalse) { row_minima=(double) p[i]; row_maxima=(double) p[i]; row_initialize=MagickFalse; } else { if ((double) p[i] < row_minima) row_minima=(double) p[i]; if ((double) p[i] > row_maxima) row_maxima=(double) p[i]; } } p+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageRange) #endif { if (initialize != MagickFalse) { *minima=row_minima; *maxima=row_maxima; initialize=MagickFalse; } else { if (row_minima < *minima) *minima=row_minima; if (row_maxima > *maxima) *maxima=row_maxima; } } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e S t a t i s t i c s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageStatistics() returns statistics for each channel in the image. The % statistics include the channel depth, its minima, maxima, mean, standard % deviation, kurtosis and skewness. You can access the red channel mean, for % example, like this: % % channel_statistics=GetImageStatistics(image,exception); % red_mean=channel_statistics[RedPixelChannel].mean; % % Use MagickRelinquishMemory() to free the statistics buffer. % % The format of the GetImageStatistics method is: % % ChannelStatistics *GetImageStatistics(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ChannelStatistics *GetImageStatistics(const Image *image, ExceptionInfo *exception) { ChannelStatistics *channel_statistics; double area, *histogram, standard_deviation; MagickStatusType status; QuantumAny range; register ssize_t i; size_t depth; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)* sizeof(*histogram)); channel_statistics=(ChannelStatistics *) AcquireQuantumMemory( MaxPixelChannels+1,sizeof(*channel_statistics)); if ((channel_statistics == (ChannelStatistics *) NULL) || (histogram == (double *) NULL)) { if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (channel_statistics != (ChannelStatistics *) NULL) channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(channel_statistics); } (void) memset(channel_statistics,0,(MaxPixelChannels+1)* sizeof(*channel_statistics)); for (i=0; i <= (ssize_t) MaxPixelChannels; i++) { channel_statistics[i].depth=1; channel_statistics[i].maxima=(-MagickMaximumValue); channel_statistics[i].minima=MagickMaximumValue; } (void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; /* Compute pixel statistics. */ p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelReadMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; if (channel_statistics[channel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[channel].depth; range=GetQuantumRange(depth); status=p[i] != ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range), range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[channel].depth++; i--; continue; } } if ((double) p[i] < channel_statistics[channel].minima) channel_statistics[channel].minima=(double) p[i]; if ((double) p[i] > channel_statistics[channel].maxima) channel_statistics[channel].maxima=(double) p[i]; channel_statistics[channel].sum+=p[i]; channel_statistics[channel].sum_squared+=(double) p[i]*p[i]; channel_statistics[channel].sum_cubed+=(double) p[i]*p[i]*p[i]; channel_statistics[channel].sum_fourth_power+=(double) p[i]*p[i]*p[i]* p[i]; channel_statistics[channel].area++; if ((double) p[i] < channel_statistics[CompositePixelChannel].minima) channel_statistics[CompositePixelChannel].minima=(double) p[i]; if ((double) p[i] > channel_statistics[CompositePixelChannel].maxima) channel_statistics[CompositePixelChannel].maxima=(double) p[i]; histogram[GetPixelChannels(image)*ScaleQuantumToMap( ClampToQuantum((double) p[i]))+i]++; channel_statistics[CompositePixelChannel].sum+=(double) p[i]; channel_statistics[CompositePixelChannel].sum_squared+=(double) p[i]*p[i]; channel_statistics[CompositePixelChannel].sum_cubed+=(double) p[i]*p[i]*p[i]; channel_statistics[CompositePixelChannel].sum_fourth_power+=(double) p[i]*p[i]*p[i]*p[i]; channel_statistics[CompositePixelChannel].area++; } p+=GetPixelChannels(image); } } for (i=0; i <= (ssize_t) MaxPixelChannels; i++) { /* Normalize pixel statistics. */ area=PerceptibleReciprocal(channel_statistics[i].area); channel_statistics[i].sum*=area; channel_statistics[i].sum_squared*=area; channel_statistics[i].sum_cubed*=area; channel_statistics[i].sum_fourth_power*=area; channel_statistics[i].mean=channel_statistics[i].sum; channel_statistics[i].variance=channel_statistics[i].sum_squared; standard_deviation=sqrt(channel_statistics[i].variance- (channel_statistics[i].mean*channel_statistics[i].mean)); standard_deviation=sqrt(PerceptibleReciprocal(channel_statistics[i].area- 1.0)*channel_statistics[i].area*standard_deviation*standard_deviation); channel_statistics[i].standard_deviation=standard_deviation; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double number_bins; register ssize_t j; /* Compute pixel entropy. */ PixelChannel channel = GetPixelChannelChannel(image,i); number_bins=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) if (histogram[GetPixelChannels(image)*j+i] > 0.0) number_bins++; area=PerceptibleReciprocal(channel_statistics[channel].area); for (j=0; j <= (ssize_t) MaxMap; j++) { double count; count=area*histogram[GetPixelChannels(image)*j+i]; channel_statistics[channel].entropy+=-count*MagickLog10(count)* PerceptibleReciprocal(MagickLog10(number_bins)); channel_statistics[CompositePixelChannel].entropy+=-count* MagickLog10(count)*PerceptibleReciprocal(MagickLog10(number_bins))/ GetPixelChannels(image); } } histogram=(double *) RelinquishMagickMemory(histogram); for (i=0; i <= (ssize_t) MaxPixelChannels; i++) { /* Compute kurtosis & skewness statistics. */ standard_deviation=PerceptibleReciprocal( channel_statistics[i].standard_deviation); channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0* channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0* channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].mean)*(standard_deviation*standard_deviation* standard_deviation); channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0* channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0* channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean* channel_statistics[i].mean*1.0*channel_statistics[i].mean* channel_statistics[i].mean)*(standard_deviation*standard_deviation* standard_deviation*standard_deviation)-3.0; } channel_statistics[CompositePixelChannel].mean=0.0; channel_statistics[CompositePixelChannel].standard_deviation=0.0; channel_statistics[CompositePixelChannel].entropy=0.0; for (i=0; i < (ssize_t) MaxPixelChannels; i++) { channel_statistics[CompositePixelChannel].mean+= channel_statistics[i].mean; channel_statistics[CompositePixelChannel].standard_deviation+= channel_statistics[i].standard_deviation; channel_statistics[CompositePixelChannel].entropy+= channel_statistics[i].entropy; } channel_statistics[CompositePixelChannel].mean/=(double) GetImageChannels(image); channel_statistics[CompositePixelChannel].standard_deviation/=(double) GetImageChannels(image); channel_statistics[CompositePixelChannel].entropy/=(double) GetImageChannels(image); if (y < (ssize_t) image->rows) channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(channel_statistics); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l y n o m i a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolynomialImage() returns a new image where each pixel is the sum of the % pixels in the image sequence after applying its corresponding terms % (coefficient and degree pairs). % % The format of the PolynomialImage method is: % % Image *PolynomialImage(const Image *images,const size_t number_terms, % const double *terms,ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o number_terms: the number of terms in the list. The actual list length % is 2 x number_terms + 1 (the constant). % % o terms: the list of polynomial coefficients and degree pairs and a % constant. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolynomialImage(const Image *images, const size_t number_terms,const double *terms,ExceptionInfo *exception) { #define PolynomialImageTag "Polynomial/Image" CacheView *polynomial_view; Image *image; MagickBooleanType status; MagickOffsetType progress; PixelChannels **magick_restrict polynomial_pixels; size_t number_images; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImageCanvas(images,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { image=DestroyImage(image); return((Image *) NULL); } number_images=GetImageListLength(images); polynomial_pixels=AcquirePixelThreadSet(images); if (polynomial_pixels == (PixelChannels **) NULL) { image=DestroyImage(image); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return((Image *) NULL); } /* Polynomial image pixels. */ status=MagickTrue; progress=0; polynomial_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register ssize_t i, x; register PixelChannels *polynomial_pixel; register Quantum *magick_restrict q; ssize_t j; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } polynomial_pixel=polynomial_pixels[id]; for (j=0; j < (ssize_t) image->columns; j++) for (i=0; i < MaxPixelChannels; i++) polynomial_pixel[j].channel[i]=0.0; next=images; for (j=0; j < (ssize_t) number_images; j++) { register const Quantum *p; if (j >= (ssize_t) number_terms) continue; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); break; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(next); i++) { MagickRealType coefficient, degree; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(next,channel); PixelTrait polynomial_traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (polynomial_traits == UndefinedPixelTrait)) continue; if ((traits & UpdatePixelTrait) == 0) continue; coefficient=(MagickRealType) terms[2*j]; degree=(MagickRealType) terms[(j << 1)+1]; polynomial_pixel[x].channel[i]+=coefficient* pow(QuantumScale*GetPixelChannel(image,channel,p),degree); } p+=GetPixelChannels(next); } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumRange*polynomial_pixel[x].channel[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,PolynomialImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } polynomial_view=DestroyCacheView(polynomial_view); polynomial_pixels=DestroyPixelThreadSet(images,polynomial_pixels); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t a t i s t i c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StatisticImage() makes each pixel the min / max / median / mode / etc. of % the neighborhood of the specified width and height. % % The format of the StatisticImage method is: % % Image *StatisticImage(const Image *image,const StatisticType type, % const size_t width,const size_t height,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the statistic type (median, mode, etc.). % % o width: the width of the pixel neighborhood. % % o height: the height of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ typedef struct _SkipNode { size_t next[9], count, signature; } SkipNode; typedef struct _SkipList { ssize_t level; SkipNode *nodes; } SkipList; typedef struct _PixelList { size_t length, seed; SkipList skip_list; size_t signature; } PixelList; static PixelList *DestroyPixelList(PixelList *pixel_list) { if (pixel_list == (PixelList *) NULL) return((PixelList *) NULL); if (pixel_list->skip_list.nodes != (SkipNode *) NULL) pixel_list->skip_list.nodes=(SkipNode *) RelinquishAlignedMemory( pixel_list->skip_list.nodes); pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list); return(pixel_list); } static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list) { register ssize_t i; assert(pixel_list != (PixelList **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixel_list[i] != (PixelList *) NULL) pixel_list[i]=DestroyPixelList(pixel_list[i]); pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list); return(pixel_list); } static PixelList *AcquirePixelList(const size_t width,const size_t height) { PixelList *pixel_list; pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list)); if (pixel_list == (PixelList *) NULL) return(pixel_list); (void) memset((void *) pixel_list,0,sizeof(*pixel_list)); pixel_list->length=width*height; pixel_list->skip_list.nodes=(SkipNode *) AcquireAlignedMemory(65537UL, sizeof(*pixel_list->skip_list.nodes)); if (pixel_list->skip_list.nodes == (SkipNode *) NULL) return(DestroyPixelList(pixel_list)); (void) memset(pixel_list->skip_list.nodes,0,65537UL* sizeof(*pixel_list->skip_list.nodes)); pixel_list->signature=MagickCoreSignature; return(pixel_list); } static PixelList **AcquirePixelListThreadSet(const size_t width, const size_t height) { PixelList **pixel_list; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixel_list=(PixelList **) AcquireQuantumMemory(number_threads, sizeof(*pixel_list)); if (pixel_list == (PixelList **) NULL) return((PixelList **) NULL); (void) memset(pixel_list,0,number_threads*sizeof(*pixel_list)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_list[i]=AcquirePixelList(width,height); if (pixel_list[i] == (PixelList *) NULL) return(DestroyPixelListThreadSet(pixel_list)); } return(pixel_list); } static void AddNodePixelList(PixelList *pixel_list,const size_t color) { register SkipList *p; register ssize_t level; size_t search, update[9]; /* Initialize the node. */ p=(&pixel_list->skip_list); p->nodes[color].signature=pixel_list->signature; p->nodes[color].count=1; /* Determine where it belongs in the list. */ search=65536UL; for (level=p->level; level >= 0; level--) { while (p->nodes[search].next[level] < color) search=p->nodes[search].next[level]; update[level]=search; } /* Generate a pseudo-random level for this node. */ for (level=0; ; level++) { pixel_list->seed=(pixel_list->seed*42893621L)+1L; if ((pixel_list->seed & 0x300) != 0x300) break; } if (level > 8) level=8; if (level > (p->level+2)) level=p->level+2; /* If we're raising the list's level, link back to the root node. */ while (level > p->level) { p->level++; update[p->level]=65536UL; } /* Link the node into the skip-list. */ do { p->nodes[color].next[level]=p->nodes[update[level]].next[level]; p->nodes[update[level]].next[level]=color; } while (level-- > 0); } static inline void GetMedianPixelList(PixelList *pixel_list,Quantum *pixel) { register SkipList *p; size_t color; ssize_t count; /* Find the median value for each of the color. */ p=(&pixel_list->skip_list); color=65536L; count=0; do { color=p->nodes[color].next[0]; count+=p->nodes[color].count; } while (count <= (ssize_t) (pixel_list->length >> 1)); *pixel=ScaleShortToQuantum((unsigned short) color); } static inline void GetModePixelList(PixelList *pixel_list,Quantum *pixel) { register SkipList *p; size_t color, max_count, mode; ssize_t count; /* Make each pixel the 'predominant color' of the specified neighborhood. */ p=(&pixel_list->skip_list); color=65536L; mode=color; max_count=p->nodes[mode].count; count=0; do { color=p->nodes[color].next[0]; if (p->nodes[color].count > max_count) { mode=color; max_count=p->nodes[mode].count; } count+=p->nodes[color].count; } while (count < (ssize_t) pixel_list->length); *pixel=ScaleShortToQuantum((unsigned short) mode); } static inline void GetNonpeakPixelList(PixelList *pixel_list,Quantum *pixel) { register SkipList *p; size_t color, next, previous; ssize_t count; /* Finds the non peak value for each of the colors. */ p=(&pixel_list->skip_list); color=65536L; next=p->nodes[color].next[0]; count=0; do { previous=color; color=next; next=p->nodes[color].next[0]; count+=p->nodes[color].count; } while (count <= (ssize_t) (pixel_list->length >> 1)); if ((previous == 65536UL) && (next != 65536UL)) color=next; else if ((previous != 65536UL) && (next == 65536UL)) color=previous; *pixel=ScaleShortToQuantum((unsigned short) color); } static inline void InsertPixelList(const Quantum pixel,PixelList *pixel_list) { size_t signature; unsigned short index; index=ScaleQuantumToShort(pixel); signature=pixel_list->skip_list.nodes[index].signature; if (signature == pixel_list->signature) { pixel_list->skip_list.nodes[index].count++; return; } AddNodePixelList(pixel_list,index); } static void ResetPixelList(PixelList *pixel_list) { int level; register SkipNode *root; register SkipList *p; /* Reset the skip-list. */ p=(&pixel_list->skip_list); root=p->nodes+65536UL; p->level=0; for (level=0; level < 9; level++) root->next[level]=65536UL; pixel_list->seed=pixel_list->signature++; } MagickExport Image *StatisticImage(const Image *image,const StatisticType type, const size_t width,const size_t height,ExceptionInfo *exception) { #define StatisticImageTag "Statistic/Image" CacheView *image_view, *statistic_view; Image *statistic_image; MagickBooleanType status; MagickOffsetType progress; PixelList **magick_restrict pixel_list; ssize_t center, y; /* Initialize statistics image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); statistic_image=CloneImage(image,0,0,MagickTrue, exception); if (statistic_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(statistic_image,DirectClass,exception); if (status == MagickFalse) { statistic_image=DestroyImage(statistic_image); return((Image *) NULL); } pixel_list=AcquirePixelListThreadSet(MagickMax(width,1),MagickMax(height,1)); if (pixel_list == (PixelList **) NULL) { statistic_image=DestroyImage(statistic_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Make each pixel the min / max / median / mode / etc. of the neighborhood. */ center=(ssize_t) GetPixelChannels(image)*(image->columns+MagickMax(width,1))* (MagickMax(height,1)/2L)+GetPixelChannels(image)*(MagickMax(width,1)/2L); status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); statistic_view=AcquireAuthenticCacheView(statistic_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,statistic_image,statistic_image->rows,1) #endif for (y=0; y < (ssize_t) statistic_image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) MagickMax(width,1)/2L),y- (ssize_t) (MagickMax(height,1)/2L),image->columns+MagickMax(width,1), MagickMax(height,1),exception); q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) statistic_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double area, maximum, minimum, sum, sum_squared; Quantum pixel; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait statistic_traits=GetPixelChannelTraits(statistic_image, channel); if ((traits == UndefinedPixelTrait) || (statistic_traits == UndefinedPixelTrait)) continue; if (((statistic_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) <= (QuantumRange/2))) { SetPixelChannel(statistic_image,channel,p[center+i],q); continue; } if ((statistic_traits & UpdatePixelTrait) == 0) continue; pixels=p; area=0.0; minimum=pixels[i]; maximum=pixels[i]; sum=0.0; sum_squared=0.0; ResetPixelList(pixel_list[id]); for (v=0; v < (ssize_t) MagickMax(height,1); v++) { for (u=0; u < (ssize_t) MagickMax(width,1); u++) { if ((type == MedianStatistic) || (type == ModeStatistic) || (type == NonpeakStatistic)) { InsertPixelList(pixels[i],pixel_list[id]); pixels+=GetPixelChannels(image); continue; } area++; if (pixels[i] < minimum) minimum=(double) pixels[i]; if (pixels[i] > maximum) maximum=(double) pixels[i]; sum+=(double) pixels[i]; sum_squared+=(double) pixels[i]*pixels[i]; pixels+=GetPixelChannels(image); } pixels+=GetPixelChannels(image)*image->columns; } switch (type) { case GradientStatistic: { pixel=ClampToQuantum(MagickAbsoluteValue(maximum-minimum)); break; } case MaximumStatistic: { pixel=ClampToQuantum(maximum); break; } case MeanStatistic: default: { pixel=ClampToQuantum(sum/area); break; } case MedianStatistic: { GetMedianPixelList(pixel_list[id],&pixel); break; } case MinimumStatistic: { pixel=ClampToQuantum(minimum); break; } case ModeStatistic: { GetModePixelList(pixel_list[id],&pixel); break; } case NonpeakStatistic: { GetNonpeakPixelList(pixel_list[id],&pixel); break; } case RootMeanSquareStatistic: { pixel=ClampToQuantum(sqrt(sum_squared/area)); break; } case StandardDeviationStatistic: { pixel=ClampToQuantum(sqrt(sum_squared/area-(sum/area*sum/area))); break; } } SetPixelChannel(statistic_image,channel,pixel,q); } p+=GetPixelChannels(image); q+=GetPixelChannels(statistic_image); } if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,StatisticImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } statistic_view=DestroyCacheView(statistic_view); image_view=DestroyCacheView(image_view); pixel_list=DestroyPixelListThreadSet(pixel_list); if (status == MagickFalse) statistic_image=DestroyImage(statistic_image); return(statistic_image); }
dotp.c
#include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #define REPEAT 100 int array_size = 10000000; double *gen_array(int n) { double *array = malloc(sizeof(*array) * n); for (int i = 0; i < n; i++) array[i] = drand48(); return array; } double dotp_naive(double *x, double *y, int arr_size) { double global_sum = 0.0; #pragma omp parallel { #pragma omp for for (int i = 0; i < arr_size; i++) #pragma omp critical global_sum += x[i] * y[i]; } return global_sum; } // EDIT THIS FUNCTION PART 1 double dotp_manual_optimized(double *x, double *y, int arr_size) { int nt; #pragma omp parallel { nt = omp_get_num_threads(); } double *sums = calloc(nt, sizeof(double)); #pragma omp parallel { int tid = omp_get_thread_num(); double each_sum = 0.0; for (int i = tid; i < arr_size; i += nt) each_sum += x[i] * y[i]; sums[tid] = each_sum; } double sum = 0; for (int i = 0; i < nt; i++) sum += sums[i]; free(sums); return sum; } // EDIT THIS FUNCTION PART 2 double dotp_reduction_optimized(double *x, double *y, int arr_size) { double global_sum = 0.0; #pragma omp parallel for reduction(+ : global_sum) for (int i = 0; i < arr_size; i++) global_sum += x[i] * y[i]; return global_sum; } int main() { // Generate input vectors double *x = gen_array(array_size), *y = gen_array(array_size); double start_time, run_time; double serial_result, result; int ret = 0; // calculate result serially start_time = omp_get_wtime(); for (int j = 0; j < REPEAT; j++) { serial_result = 0.0; for (int i = 0; i < array_size; i++) serial_result += x[i] * y[i]; } run_time = omp_get_wtime() - start_time; printf("Naive solution took %f seconds\n", run_time); int num_threads = omp_get_max_threads(); for (int i = 1; i <= num_threads; i++) { omp_set_num_threads(i); start_time = omp_get_wtime(); for (int j = 0; j < REPEAT; j++) result = dotp_manual_optimized(x, y, array_size); run_time = omp_get_wtime() - start_time; printf("Manual Optimized: %d thread(s) took %f seconds\n", i, run_time); // verify result is correct (within some threshold) if (fabs(serial_result - result) > 0.001) { printf("%g %g %g", serial_result, result, serial_result - result); printf("Incorrect result!\n"); ret = -1; goto exit; } } for (int i = 1; i <= num_threads; i++) { omp_set_num_threads(i); start_time = omp_get_wtime(); for (int j = 0; j < REPEAT; j++) result = dotp_reduction_optimized(x, y, array_size); run_time = omp_get_wtime() - start_time; printf("Reduction Optimized: %d thread(s) took %f seconds\n", i, run_time); // verify result is correct (within some threshold) if (fabs(serial_result - result) > 0.001) { printf("Incorrect result!\n"); ret = -1; goto exit; } } // Only run this once because it's too slow.. omp_set_num_threads(1); start_time = omp_get_wtime(); for (int j = 0; j < REPEAT; j++) result = dotp_naive(x, y, array_size); run_time = omp_get_wtime() - start_time; printf("Naive: %d thread(s) took %f seconds\n", 1, run_time); exit: free(x); free(y); return ret; }
par_multi_interp.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 2.20 $ ***********************************************************************EHEADER*/ #include "_hypre_parcsr_ls.h" /*-------------------------------------------------------------------------- * hypre_ParAMGBuildMultipass * This routine implements Stuben's direct interpolation with multiple passes. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildMultipass( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, double trunc_factor, HYPRE_Int P_max_elmts, HYPRE_Int weight_option, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); double *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); double *A_offd_data = NULL; HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = NULL; HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = NULL; HYPRE_Int *col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd); HYPRE_Int *col_map_offd = NULL; HYPRE_Int num_cols_offd; hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; double *P_diag_data; HYPRE_Int *P_diag_i; /*at first counter of nonzero cols for each row, finally will be pointer to start of row */ HYPRE_Int *P_diag_j; hypre_CSRMatrix *P_offd; double *P_offd_data = NULL; HYPRE_Int *P_offd_i; /*at first counter of nonzero cols for each row, finally will be pointer to start of row */ HYPRE_Int *P_offd_j = NULL; HYPRE_Int num_sends = 0; HYPRE_Int *int_buf_data = NULL; HYPRE_Int *send_map_start; HYPRE_Int *send_map_elmt; HYPRE_Int *send_procs; HYPRE_Int num_recvs = 0; HYPRE_Int *recv_vec_start; HYPRE_Int *recv_procs; HYPRE_Int *new_recv_vec_start = NULL; HYPRE_Int **Pext_send_map_start = NULL; HYPRE_Int **Pext_recv_vec_start = NULL; HYPRE_Int *Pext_start = NULL; HYPRE_Int *P_ncols = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; HYPRE_Int *P_marker; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *C_array; HYPRE_Int *C_array_offd = NULL; HYPRE_Int *pass_array = NULL; /* contains points ordered according to pass */ HYPRE_Int *pass_pointer = NULL; /* pass_pointer[j] contains pointer to first point of pass j contained in pass_array */ HYPRE_Int *P_diag_start; HYPRE_Int *P_offd_start = NULL; HYPRE_Int **P_diag_pass; HYPRE_Int **P_offd_pass = NULL; HYPRE_Int **Pext_pass = NULL; HYPRE_Int **new_elmts = NULL; /* new neighbors generated in each pass */ HYPRE_Int *new_counter = NULL; /* contains no. of new neighbors for each pass */ HYPRE_Int *loc = NULL; /* contains locations for new neighbor connections in int_o_buffer to avoid searching */ HYPRE_Int *Pext_i = NULL; /*contains P_diag_i and P_offd_i info for nonzero cols of off proc neighbors */ HYPRE_Int *Pext_send_buffer = NULL; /* used to collect global nonzero col ids in P_diag for send_map_elmts */ HYPRE_Int *map_S_to_new = NULL; /*HYPRE_Int *map_A_to_new = NULL;*/ HYPRE_Int *map_A_to_S = NULL; HYPRE_Int *new_col_map_offd = NULL; HYPRE_Int *col_map_offd_P = NULL; HYPRE_Int *permute = NULL; HYPRE_Int cnt; HYPRE_Int cnt_nz; HYPRE_Int total_nz; HYPRE_Int pass; HYPRE_Int num_passes; HYPRE_Int max_num_passes = 10; HYPRE_Int n_fine; HYPRE_Int n_coarse = 0; HYPRE_Int n_coarse_offd = 0; HYPRE_Int n_SF = 0; HYPRE_Int n_SF_offd = 0; HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int *fine_to_coarse_offd = NULL; HYPRE_Int *assigned = NULL; HYPRE_Int *assigned_offd = NULL; double *Pext_send_data = NULL; double *Pext_data = NULL; double sum_C, sum_N; double sum_C_pos, sum_C_neg; double sum_N_pos, sum_N_neg; double diagonal; double alfa = 1.0; double beta = 1.0; HYPRE_Int j_start; HYPRE_Int j_end; HYPRE_Int i,i1; HYPRE_Int j,j1; HYPRE_Int k,k1,k2,k3; HYPRE_Int pass_array_size; HYPRE_Int global_pass_array_size; HYPRE_Int local_pass_array_size; HYPRE_Int my_id, num_procs; HYPRE_Int index, start; HYPRE_Int my_first_cpt; HYPRE_Int total_global_cpts; HYPRE_Int p_cnt; HYPRE_Int total_nz_offd; HYPRE_Int cnt_nz_offd; HYPRE_Int cnt_offd, cnt_new; HYPRE_Int no_break; HYPRE_Int not_found; HYPRE_Int Pext_send_size; HYPRE_Int Pext_recv_size; HYPRE_Int old_Pext_send_size; HYPRE_Int old_Pext_recv_size; HYPRE_Int P_offd_size = 0; HYPRE_Int local_index = -1; HYPRE_Int new_num_cols_offd = 0; HYPRE_Int num_cols_offd_P; /* Threading variables */ HYPRE_Int my_thread_num, num_threads, thread_start, thread_stop; HYPRE_Int pass_length; HYPRE_Int *tmp_marker, *tmp_marker_offd; HYPRE_Int *tmp_array, *tmp_array_offd; HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1); HYPRE_Int * cnt_nz_per_thread; HYPRE_Int * cnt_nz_offd_per_thread; /* double wall_time; wall_time = hypre_MPI_Wtime(); */ /* Initialize threading variables */ max_num_threads[0] = hypre_NumThreads(); cnt_nz_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]); cnt_nz_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]); for(i=0; i < max_num_threads[0]; i++) { cnt_nz_offd_per_thread[i] = 0; cnt_nz_per_thread[i] = 0; } /*----------------------------------------------------------------------- * Access the CSR vectors for A and S. Also get size of fine grid. *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; /* total_global_cpts = 0; */ if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { comm_pkg = hypre_ParCSRMatrixCommPkg(A); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } col_offd_S_to_A = NULL; } if (col_offd_S_to_A) { col_map_offd = col_map_offd_S; num_cols_offd = num_cols_offd_S; } else { col_map_offd = col_map_offd_A; num_cols_offd = num_cols_offd_A; } if (num_cols_offd_A) { A_offd_data = hypre_CSRMatrixData(A_offd); A_offd_j = hypre_CSRMatrixJ(A_offd); } if (num_cols_offd) S_offd_j = hypre_CSRMatrixJ(S_offd); n_fine = hypre_CSRMatrixNumRows(A_diag); /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ if (n_fine) fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine); n_coarse = 0; n_SF = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:n_coarse,n_SF ) HYPRE_SMP_SCHEDULE #endif for (i=0; i < n_fine; i++) if (CF_marker[i] == 1) n_coarse++; else if (CF_marker[i] == -3) n_SF++; pass_array_size = n_fine-n_coarse-n_SF; if (pass_array_size) pass_array = hypre_CTAlloc(HYPRE_Int, pass_array_size); pass_pointer = hypre_CTAlloc(HYPRE_Int, max_num_passes+1); if (n_fine) assigned = hypre_CTAlloc(HYPRE_Int, n_fine); P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1); if (n_coarse) C_array = hypre_CTAlloc(HYPRE_Int, n_coarse); if (num_cols_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd); if (num_functions > 1) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); send_map_start = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmt = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); recv_vec_start = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); if (send_map_start[num_sends]) int_buf_data = hypre_CTAlloc(HYPRE_Int, send_map_start[num_sends]); } index = 0; for (i=0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i+1]; j++) int_buf_data[index++] = CF_marker[send_map_elmt[j]]; } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (num_functions > 1) { index = 0; for (i=0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i+1]; j++) int_buf_data[index++] = dof_func[send_map_elmt[j]]; } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } } n_coarse_offd = 0; n_SF_offd = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:n_coarse_offd,n_SF_offd) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_offd; i++) if (CF_marker_offd[i] == 1) n_coarse_offd++; else if (CF_marker_offd[i] == -3) n_SF_offd++; if (num_cols_offd) { assigned_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd); map_S_to_new = hypre_CTAlloc(HYPRE_Int, num_cols_offd); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd); new_col_map_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd); } /*----------------------------------------------------------------------- * First Pass: determine the maximal size of P, and elementsPerRow[i]. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Assigned points are points for which we know an interpolation * formula already, and which are thus available to interpolate from. * assigned[i]=0 for C points, and 1, 2, 3, ... for F points, depending * in which pass their interpolation formula is determined. * * pass_array contains the points ordered according to its pass, i.e. * | C-points | points of pass 1 | points of pass 2 | .... * C_points are points 0 through pass_pointer[1]-1, * points of pass k (0 < k < num_passes) are contained in points * pass_pointer[k] through pass_pointer[k+1]-1 of pass_array . * * pass_array is also used to avoid going through all points for each pass, * i,e. at the bginning it contains all points in descending order starting * with n_fine-1. Then starting from the last point, we evaluate whether * it is a C_point (pass 0). If it is the point is brought to the front * and the length of the points to be searched is shortened. This is * done until the parameter cnt (which determines the first point of * pass_array to be searched) becomes n_fine. Then all points have been * assigned a pass number. *-----------------------------------------------------------------------*/ cnt = 0; p_cnt = pass_array_size-1; P_diag_i[0] = 0; P_offd_i[0] = 0; for (i = 0; i < n_fine; i++) { if (CF_marker[i] == 1) { fine_to_coarse[i] = cnt; /* this C point is assigned index coarse_counter on coarse grid, and in column of P */ C_array[cnt++] = i; assigned[i] = 0; P_diag_i[i+1] = 1; /* one element in row i1 of P */ P_offd_i[i+1] = 0; } else if (CF_marker[i] == -1) { pass_array[p_cnt--] = i; P_diag_i[i+1] = 0; P_offd_i[i+1] = 0; assigned[i] = -1; fine_to_coarse[i] = -1; } else { P_diag_i[i+1] = 0; P_offd_i[i+1] = 0; assigned[i] = -1; fine_to_coarse[i] = -1; } } index = 0; for (i=0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i+1]; j++) { int_buf_data[index] = fine_to_coarse[send_map_elmt[j]]; if (int_buf_data[index] > -1) int_buf_data[index] += my_first_cpt; index++; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } new_recv_vec_start = hypre_CTAlloc(HYPRE_Int,num_recvs+1); if (n_coarse_offd) C_array_offd = hypre_CTAlloc(HYPRE_Int,n_coarse_offd); cnt = 0; new_recv_vec_start[0] = 0; for (j = 0; j < num_recvs; j++) { for (i = recv_vec_start[j]; i < recv_vec_start[j+1]; i++) { if (CF_marker_offd[i] == 1) { map_S_to_new[i] = cnt; C_array_offd[cnt] = i; new_col_map_offd[cnt++] = fine_to_coarse_offd[i]; assigned_offd[i] = 0; } else { assigned_offd[i] = -1; map_S_to_new[i] = -1; } } new_recv_vec_start[j+1] = cnt; } cnt = 0; hypre_TFree(fine_to_coarse_offd); if (col_offd_S_to_A) { map_A_to_S = hypre_CTAlloc(HYPRE_Int,num_cols_offd_A); for (i=0; i < num_cols_offd_A; i++) { if (cnt < num_cols_offd && col_map_offd_A[i] == col_map_offd[cnt]) map_A_to_S[i] = cnt++; else map_A_to_S[i] = -1; } } /*----------------------------------------------------------------------- * Mark all local neighbors of C points as 'assigned'. *-----------------------------------------------------------------------*/ pass_pointer[0] = 0; pass_pointer[1] = 0; total_nz = n_coarse; /* accumulates total number of nonzeros in P_diag */ total_nz_offd = 0; /* accumulates total number of nonzeros in P_offd */ cnt = 0; cnt_offd = 0; cnt_nz = 0; cnt_nz_offd = 0; for (i = pass_array_size-1; i > cnt-1; i--) { i1 = pass_array[i]; for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (CF_marker[j1] == 1) { P_diag_i[i1+1]++; cnt_nz++; assigned[i1] = 1; } } for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (CF_marker_offd[j1] == 1) { P_offd_i[i1+1]++; cnt_nz_offd++; assigned[i1] = 1; } } if (assigned[i1] == 1) { pass_array[i++] = pass_array[cnt]; pass_array[cnt++] = i1; } } pass_pointer[2] = cnt; /*----------------------------------------------------------------------- * All local neighbors are assigned, now need to exchange the boundary * info for assigned strong neighbors. *-----------------------------------------------------------------------*/ index = 0; for (i=0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i+1]; j++) { int_buf_data[index++] = assigned[send_map_elmt[j]]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, assigned_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } /*----------------------------------------------------------------------- * Now we need to determine strong neighbors of points of pass 1, etc. * we need to update assigned_offd after each pass *-----------------------------------------------------------------------*/ pass = 2; local_pass_array_size = pass_array_size - cnt; hypre_MPI_Allreduce(&local_pass_array_size, &global_pass_array_size, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm); while (global_pass_array_size && pass < max_num_passes) { for (i = pass_array_size-1; i > cnt-1; i--) { i1 = pass_array[i]; no_break = 1; for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass-1) { pass_array[i++] = pass_array[cnt]; pass_array[cnt++] = i1; assigned[i1] = pass; no_break = 0; break; } } if (no_break) { for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass-1) { pass_array[i++] = pass_array[cnt]; pass_array[cnt++] = i1; assigned[i1] = pass; break; } } } } /*hypre_printf("pass %d remaining points %d \n", pass, local_pass_array_size);*/ pass++; pass_pointer[pass] = cnt; local_pass_array_size = pass_array_size - cnt; hypre_MPI_Allreduce(&local_pass_array_size, &global_pass_array_size, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm); index = 0; for (i=0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i+1]; j++) { int_buf_data[index++] = assigned[send_map_elmt[j]]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, assigned_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } } hypre_TFree(int_buf_data); num_passes = pass; P_diag_pass = hypre_CTAlloc(HYPRE_Int*,num_passes); /* P_diag_pass[i] will contain all column numbers for points of pass i */ P_diag_pass[1] = hypre_CTAlloc(HYPRE_Int,cnt_nz); P_diag_start = hypre_CTAlloc(HYPRE_Int, n_fine); /* P_diag_start[i] contains pointer to begin of column numbers in P_pass for point i, P_diag_i[i+1] contains number of columns for point i */ P_offd_start = hypre_CTAlloc(HYPRE_Int, n_fine); if (num_procs > 1) { P_offd_pass = hypre_CTAlloc(HYPRE_Int*,num_passes); if (cnt_nz_offd) P_offd_pass[1] = hypre_CTAlloc(HYPRE_Int,cnt_nz_offd); else P_offd_pass[1] = NULL; new_elmts = hypre_CTAlloc(HYPRE_Int*,num_passes); new_counter = hypre_CTAlloc(HYPRE_Int, num_passes+1); new_counter[0] = 0; new_counter[1] = n_coarse_offd; new_num_cols_offd = n_coarse_offd; new_elmts[0] = new_col_map_offd; } /*----------------------------------------------------------------------- * Pass 1: now we consider points of pass 1, with strong C_neighbors, *-----------------------------------------------------------------------*/ cnt_nz = 0; cnt_nz_offd = 0; /* JBS: Possible candidate for threading */ for (i=pass_pointer[1]; i < pass_pointer[2]; i++) { i1 = pass_array[i]; P_diag_start[i1] = cnt_nz; P_offd_start[i1] = cnt_nz_offd; for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (CF_marker[j1] == 1) { P_diag_pass[1][cnt_nz++] = fine_to_coarse[j1]; } } for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (CF_marker_offd[j1] == 1) { P_offd_pass[1][cnt_nz_offd++] = map_S_to_new[j1]; } } } total_nz += cnt_nz; total_nz_offd += cnt_nz_offd; if (num_procs > 1) { tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg,1); Pext_send_map_start = hypre_CTAlloc(HYPRE_Int*,num_passes); Pext_recv_vec_start = hypre_CTAlloc(HYPRE_Int*,num_passes); Pext_pass = hypre_CTAlloc(HYPRE_Int*,num_passes); Pext_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd+1); if (num_cols_offd) Pext_start = hypre_CTAlloc(HYPRE_Int, num_cols_offd); if (send_map_start[num_sends]) P_ncols = hypre_CTAlloc(HYPRE_Int,send_map_start[num_sends]); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_cols_offd+1; i++) { Pext_i[i] = 0; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < send_map_start[num_sends]; i++) { P_ncols[i] = 0; } } old_Pext_send_size = 0; old_Pext_recv_size = 0; for (pass=2; pass < num_passes; pass++) { if (num_procs > 1) { Pext_send_map_start[pass] = hypre_CTAlloc(HYPRE_Int, num_sends+1); Pext_recv_vec_start[pass] = hypre_CTAlloc(HYPRE_Int, num_recvs+1); Pext_send_size = 0; Pext_send_map_start[pass][0] = 0; for (i=0; i < num_sends; i++) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j,j1) reduction(+:Pext_send_size) HYPRE_SMP_SCHEDULE #endif for (j=send_map_start[i]; j < send_map_start[i+1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass-1) { P_ncols[j] = P_diag_i[j1+1] + P_offd_i[j1+1]; Pext_send_size += P_ncols[j]; } } Pext_send_map_start[pass][i+1] = Pext_send_size; } comm_handle = hypre_ParCSRCommHandleCreate (11, comm_pkg, P_ncols, &Pext_i[1]); hypre_ParCSRCommHandleDestroy(comm_handle); if (Pext_send_size > old_Pext_send_size) { hypre_TFree(Pext_send_buffer); Pext_send_buffer = hypre_CTAlloc(HYPRE_Int, Pext_send_size); } old_Pext_send_size = Pext_send_size; } cnt_offd = 0; for (i=0; i < num_sends; i++) { for (j=send_map_start[i]; j < send_map_start[i+1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass-1) { j_start = P_diag_start[j1]; j_end = j_start+P_diag_i[j1+1]; for (k=j_start; k < j_end; k++) { Pext_send_buffer[cnt_offd++] = my_first_cpt +P_diag_pass[pass-1][k]; } j_start = P_offd_start[j1]; j_end = j_start+P_offd_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = P_offd_pass[pass-1][k]; k3 = 0; while (k3 < pass-1) { if (k1 < new_counter[k3+1]) { k2 = k1-new_counter[k3]; Pext_send_buffer[cnt_offd++] = new_elmts[k3][k2]; break; } k3++; } } } } } if (num_procs > 1) { Pext_recv_size = 0; Pext_recv_vec_start[pass][0] = 0; cnt_offd = 0; for (i=0; i < num_recvs; i++) { for (j=recv_vec_start[i]; j<recv_vec_start[i+1]; j++) { if (assigned_offd[j] == pass-1) { Pext_start[j] = cnt_offd; cnt_offd += Pext_i[j+1]; } } Pext_recv_size = cnt_offd; Pext_recv_vec_start[pass][i+1] = Pext_recv_size; } hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = Pext_send_map_start[pass]; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = Pext_recv_vec_start[pass]; if (Pext_recv_size) { Pext_pass[pass] = hypre_CTAlloc(HYPRE_Int, Pext_recv_size); new_elmts[pass-1] = hypre_CTAlloc(HYPRE_Int,Pext_recv_size); } else { Pext_pass[pass] = NULL; new_elmts[pass-1] = NULL; } comm_handle = hypre_ParCSRCommHandleCreate (11, tmp_comm_pkg, Pext_send_buffer, Pext_pass[pass]); hypre_ParCSRCommHandleDestroy(comm_handle); if (Pext_recv_size > old_Pext_recv_size) { hypre_TFree(loc); loc = hypre_CTAlloc(HYPRE_Int,Pext_recv_size); } old_Pext_recv_size = Pext_recv_size; } cnt_new = 0; cnt_offd = 0; /* JBS: Possible candidate for threading */ for (i=0; i < num_recvs; i++) { for (j=recv_vec_start[i]; j < recv_vec_start[i+1]; j++) { if (assigned_offd[j] == pass-1) { for (j1 = cnt_offd; j1 < cnt_offd+Pext_i[j+1]; j1++) { k1 = Pext_pass[pass][j1]; k2 = k1 - my_first_cpt; if (k2 > -1 && k2 < n_coarse) { Pext_pass[pass][j1] = -k2-1; } else { not_found = 1; k3 = 0; while (k3 < pass-1 && not_found) { k2 = hypre_BinarySearch(new_elmts[k3], k1, (new_counter[k3+1]-new_counter[k3])); if (k2 > -1) { Pext_pass[pass][j1] = k2 + new_counter[k3]; not_found = 0; } else { k3++; } } if (not_found) { new_elmts[pass-1][cnt_new] = Pext_pass[pass][j1]; loc[cnt_new++] = j1; } } } cnt_offd += Pext_i[j+1]; } } } if (cnt_new) { hypre_qsort2i(new_elmts[pass-1],loc,0,cnt_new-1); cnt = 0; local_index = new_counter[pass-1]; Pext_pass[pass][loc[0]] = local_index; for (i=1; i < cnt_new; i++) { if (new_elmts[pass-1][i] > new_elmts[pass-1][cnt]) { new_elmts[pass-1][++cnt] = new_elmts[pass-1][i]; local_index++; } Pext_pass[pass][loc[i]] = local_index; } new_counter[pass] = local_index+1; } else if (num_procs > 1) new_counter[pass] = new_counter[pass-1]; if (new_num_cols_offd < local_index+1) { new_num_cols_offd = local_index+1; } pass_length = pass_pointer[pass+1] - pass_pointer[pass]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,my_thread_num,num_threads,thread_start,thread_stop,cnt_nz,cnt_nz_offd,i1,j,j1,j_start,j_end,k1,k,P_marker,P_marker_offd) #endif { /* Thread by computing the sparsity structure for this pass only over * each thread's range of rows. Rows are divided up evenly amongst * the threads. The necessary thread-wise temporary arrays, like * P_marker, are initialized and de-allocated internally to the * parallel region. */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = (pass_length/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { thread_stop = pass_length; } else { thread_stop = (pass_length/num_threads)*(my_thread_num+1); } thread_start += pass_pointer[pass]; thread_stop += pass_pointer[pass]; /* Local initializations */ cnt_nz = 0; cnt_nz_offd = 0; /* This block of code is to go to the top of the parallel region starting before * the loop over num_passes. */ P_marker = hypre_CTAlloc(HYPRE_Int, n_coarse); /* marks points to see if they're counted */ for (i=0; i < n_coarse; i++) { P_marker[i] = -1; } if (new_num_cols_offd == local_index+1) { P_marker_offd = hypre_CTAlloc(HYPRE_Int,new_num_cols_offd); for (i=0; i < new_num_cols_offd; i++) { P_marker_offd[i] = -1; } } else if (n_coarse_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd); for (i=0; i < n_coarse_offd; i++) { P_marker_offd[i] = -1; } } /* Need some variables to store each threads cnt_nz and cnt_nz_offd, and * then stitch things together as in par_interp.c * This loop writes * P_diag_i, P_offd_i: data parallel here, and require no special treatment * P_diag_start, P_offd_start: are not data parallel, require special treatment */ for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; P_diag_start[i1] = cnt_nz; P_offd_start[i1] = cnt_nz_offd; for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass-1) { j_start = P_diag_start[j1]; j_end = j_start+P_diag_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = P_diag_pass[pass-1][k]; if (P_marker[k1] != i1) { cnt_nz++; P_diag_i[i1+1]++; P_marker[k1] = i1; } } j_start = P_offd_start[j1]; j_end = j_start+P_offd_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = P_offd_pass[pass-1][k]; if (P_marker_offd[k1] != i1) { cnt_nz_offd++; P_offd_i[i1+1]++; P_marker_offd[k1] = i1; } } } } j_start = 0; for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass-1) { j_start = Pext_start[j1]; j_end = j_start+Pext_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; if (k1 < 0) { if (P_marker[-k1-1] != i1) { cnt_nz++; P_diag_i[i1+1]++; P_marker[-k1-1] = i1; } } else if (P_marker_offd[k1] != i1) { cnt_nz_offd++; P_offd_i[i1+1]++; P_marker_offd[k1] = i1; } } } } } /* Update P_diag_start, P_offd_start with cumulative * nonzero counts over all threads */ if(my_thread_num == 0) { max_num_threads[0] = num_threads; } cnt_nz_offd_per_thread[my_thread_num] = cnt_nz_offd; cnt_nz_per_thread[my_thread_num] = cnt_nz; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if(my_thread_num == 0) { for(i = 1; i < max_num_threads[0]; i++) { cnt_nz_offd_per_thread[i] += cnt_nz_offd_per_thread[i-1]; cnt_nz_per_thread[i] += cnt_nz_per_thread[i-1]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if(my_thread_num > 0) { /* update this thread's section of P_diag_start and P_offd_start * with the num of nz's counted by previous threads */ for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; P_diag_start[i1] += cnt_nz_per_thread[my_thread_num-1]; P_offd_start[i1] += cnt_nz_offd_per_thread[my_thread_num-1]; } } else /* if my_thread_num == 0 */ { /* Grab the nz count for all threads */ cnt_nz = cnt_nz_per_thread[max_num_threads[0]-1]; cnt_nz_offd = cnt_nz_offd_per_thread[max_num_threads[0]-1]; /* Updated total nz count */ total_nz += cnt_nz; total_nz_offd += cnt_nz_offd; /* Allocate P_diag_pass and P_offd_pass for all threads */ P_diag_pass[pass] = hypre_CTAlloc(HYPRE_Int, cnt_nz); if (cnt_nz_offd) P_offd_pass[pass] = hypre_CTAlloc(HYPRE_Int, cnt_nz_offd); else if (num_procs > 1) P_offd_pass[pass] = NULL; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* offset cnt_nz and cnt_nz_offd to point to the starting * point in P_diag_pass and P_offd_pass for each thread */ if(my_thread_num > 0) { cnt_nz = cnt_nz_per_thread[my_thread_num-1]; cnt_nz_offd = cnt_nz_offd_per_thread[my_thread_num-1]; } else { cnt_nz = 0; cnt_nz_offd = 0; } /* Set P_diag_pass and P_offd_pass */ for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass-1) { j_start = P_diag_start[j1]; j_end = j_start+P_diag_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = P_diag_pass[pass-1][k]; if (P_marker[k1] != -i1-1) { P_diag_pass[pass][cnt_nz++] = k1; P_marker[k1] = -i1-1; } } j_start = P_offd_start[j1]; j_end = j_start+P_offd_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = P_offd_pass[pass-1][k]; if (P_marker_offd[k1] != -i1-1) { P_offd_pass[pass][cnt_nz_offd++] = k1; P_marker_offd[k1] = -i1-1; } } } } for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass-1) { j_start = Pext_start[j1]; j_end = j_start+Pext_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; if (k1 < 0) { if (P_marker[-k1-1] != -i1-1) { P_diag_pass[pass][cnt_nz++] = -k1-1; P_marker[-k1-1] = -i1-1; } } else if (P_marker_offd[k1] != -i1-1) { P_offd_pass[pass][cnt_nz_offd++] = k1; P_marker_offd[k1] = -i1-1; } } } } } hypre_TFree(P_marker); if ( (n_coarse_offd) || (new_num_cols_offd == local_index+1) ) { hypre_TFree(P_marker_offd); } } /* End parallel region */ } hypre_TFree(loc); hypre_TFree(P_ncols); hypre_TFree(Pext_send_buffer); hypre_TFree(new_recv_vec_start); hypre_TFree(cnt_nz_per_thread); hypre_TFree(cnt_nz_offd_per_thread); hypre_TFree(max_num_threads); P_diag_j = hypre_CTAlloc(HYPRE_Int,total_nz); P_diag_data = hypre_CTAlloc(double,total_nz); if (total_nz_offd) { P_offd_j = hypre_CTAlloc(HYPRE_Int,total_nz_offd); P_offd_data = hypre_CTAlloc(double,total_nz_offd); } for (i=0; i < n_fine; i++) { P_diag_i[i+1] += P_diag_i[i]; P_offd_i[i+1] += P_offd_i[i]; } /* determine P for coarse points */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,i1) HYPRE_SMP_SCHEDULE #endif for (i=0; i < n_coarse; i++) { i1 = C_array[i]; P_diag_j[P_diag_i[i1]] = fine_to_coarse[i1]; P_diag_data[P_diag_i[i1]] = 1.0; } if (weight_option) /*if this is set, weights are separated into negative and positive offdiagonals and accumulated accordingly */ { pass_length = pass_pointer[2]-pass_pointer[1]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,P_marker,P_marker_offd,i,i1,sum_C_pos,sum_C_neg,sum_N_pos,sum_N_neg,j_start,j_end,j,k1,cnt,j1,cnt_offd,diagonal,alfa,beta) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for pass one. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ P_marker = hypre_CTAlloc(HYPRE_Int,n_fine); for (i=0; i < n_fine; i++) { P_marker[i] = -1; } if (num_cols_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int,num_cols_offd); for (i=0; i < num_cols_offd; i++) P_marker_offd[i] = -1; } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[1] + (pass_length/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { thread_stop = pass_pointer[1] + pass_length; } else { thread_stop = pass_pointer[1] + (pass_length/num_threads)*(my_thread_num+1); } /* determine P for points of pass 1, i.e. neighbors of coarse points */ for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C_pos = 0; sum_C_neg = 0; sum_N_pos = 0; sum_N_neg = 0; j_start = P_diag_start[i1]; j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_diag_pass[1][j]; P_marker[C_array[k1]] = i1; } cnt = P_diag_i[i1]; for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++) { j1 = A_diag_j[j]; if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) { if (A_diag_data[j] < 0) sum_N_neg += A_diag_data[j]; else sum_N_pos += A_diag_data[j]; } if (j1 != -1 && P_marker[j1] == i1) { P_diag_data[cnt] = A_diag_data[j]; P_diag_j[cnt++] = fine_to_coarse[j1]; if (A_diag_data[j] < 0) sum_C_neg += A_diag_data[j]; else sum_C_pos += A_diag_data[j]; } } j_start = P_offd_start[i1]; j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_offd_pass[1][j]; P_marker_offd[C_array_offd[k1]] = i1; } cnt_offd = P_offd_i[i1]; for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++) { if (col_offd_S_to_A) j1 = map_A_to_S[A_offd_j[j]]; else j1 = A_offd_j[j]; if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func_offd[j1])) { if (A_offd_data[j] < 0) sum_N_neg += A_offd_data[j]; else sum_N_pos += A_offd_data[j]; } if (j1 != -1 && P_marker_offd[j1] == i1) { P_offd_data[cnt_offd] = A_offd_data[j]; P_offd_j[cnt_offd++] = map_S_to_new[j1]; if (A_offd_data[j] < 0) sum_C_neg += A_offd_data[j]; else sum_C_pos += A_offd_data[j]; } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C_neg*diagonal) alfa = -sum_N_neg/(sum_C_neg*diagonal); if (sum_C_pos*diagonal) beta = -sum_N_pos/(sum_C_pos*diagonal); for (j=P_diag_i[i1]; j < cnt; j++) if (P_diag_data[j] < 0) P_diag_data[j] *= alfa; else P_diag_data[j] *= beta; for (j=P_offd_i[i1]; j < cnt_offd; j++) if (P_offd_data[j] < 0) P_offd_data[j] *= alfa; else P_offd_data[j] *= beta; } hypre_TFree(P_marker); if (num_cols_offd) { hypre_TFree(P_marker_offd); } } /* End Parallel Region */ old_Pext_send_size = 0; old_Pext_recv_size = 0; /*if (!col_offd_S_to_A) hypre_TFree(map_A_to_new);*/ if (n_coarse) hypre_TFree(C_array); hypre_TFree(C_array_offd); hypre_TFree(P_diag_pass[1]); if (num_procs > 1) hypre_TFree(P_offd_pass[1]); for (pass = 2; pass < num_passes; pass++) { if (num_procs > 1) { Pext_send_size = Pext_send_map_start[pass][num_sends]; if (Pext_send_size > old_Pext_send_size) { hypre_TFree(Pext_send_data); Pext_send_data = hypre_CTAlloc(double, Pext_send_size); } old_Pext_send_size = Pext_send_size; cnt_offd = 0; for (i=0; i < num_sends; i++) { for (j=send_map_start[i]; j < send_map_start[i+1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass-1) { j_start = P_diag_i[j1]; j_end = P_diag_i[j1+1]; for (k=j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_diag_data[k]; } j_start = P_offd_i[j1]; j_end = P_offd_i[j1+1]; for (k=j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_offd_data[k]; } } } } hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = Pext_send_map_start[pass]; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = Pext_recv_vec_start[pass]; Pext_recv_size = Pext_recv_vec_start[pass][num_recvs]; if (Pext_recv_size > old_Pext_recv_size) { hypre_TFree(Pext_data); Pext_data = hypre_CTAlloc(double, Pext_recv_size); } old_Pext_recv_size = Pext_recv_size; comm_handle = hypre_ParCSRCommHandleCreate (1, tmp_comm_pkg, Pext_send_data, Pext_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(Pext_send_map_start[pass]); hypre_TFree(Pext_recv_vec_start[pass]); } pass_length = pass_pointer[pass+1]-pass_pointer[pass]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,P_marker,P_marker_offd,i,i1,sum_C_neg,sum_C_pos,sum_N_neg,sum_N_pos,j_start,j_end,cnt,j,k1,cnt_offd,j1,k,alfa,beta,diagonal,C_array,C_array_offd) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for passes >= 2. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ P_marker = hypre_CTAlloc(HYPRE_Int,n_fine); for (i=0; i < n_fine; i++) { P_marker[i] = -1; } if (num_cols_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int,num_cols_offd); for (i=0; i < num_cols_offd; i++) P_marker_offd[i] = -1; } C_array = NULL; C_array_offd = NULL; if (n_coarse) { C_array = hypre_CTAlloc(HYPRE_Int, n_coarse); } if (new_num_cols_offd > n_coarse_offd) { C_array_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd); } else if (n_coarse_offd) { C_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd); } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[pass] + (pass_length/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { thread_stop = pass_pointer[pass] + pass_length; } else { thread_stop = pass_pointer[pass] + (pass_length/num_threads)*(my_thread_num+1); } /* Loop over each thread's row-range */ for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C_neg = 0; sum_C_pos = 0; sum_N_neg = 0; sum_N_pos = 0; j_start = P_diag_start[i1]; j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1]; cnt = P_diag_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_diag_pass[pass][j]; C_array[k1] = cnt; P_diag_data[cnt] = 0; P_diag_j[cnt++] = k1; } j_start = P_offd_start[i1]; j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1]; cnt_offd = P_offd_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_offd_pass[pass][j]; C_array_offd[k1] = cnt_offd; P_offd_data[cnt_offd] = 0; P_offd_j[cnt_offd++] = k1; } for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass-1) P_marker[j1] = i1; } for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass-1) P_marker_offd[j1] = i1; } for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++) { j1 = A_diag_j[j]; if (P_marker[j1] == i1) { for (k=P_diag_i[j1]; k < P_diag_i[j1+1]; k++) { k1 = P_diag_j[k]; alfa = A_diag_data[j]*P_diag_data[k]; P_diag_data[C_array[k1]] += alfa; if (alfa < 0) { sum_C_neg += alfa; sum_N_neg += alfa; } else { sum_C_pos += alfa; sum_N_pos += alfa; } } for (k=P_offd_i[j1]; k < P_offd_i[j1+1]; k++) { k1 = P_offd_j[k]; alfa = A_diag_data[j]*P_offd_data[k]; P_offd_data[C_array_offd[k1]] += alfa; if (alfa < 0) { sum_C_neg += alfa; sum_N_neg += alfa; } else { sum_C_pos += alfa; sum_N_pos += alfa; } } } else { if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) { if (A_diag_data[j] < 0) sum_N_neg += A_diag_data[j]; else sum_N_pos += A_diag_data[j]; } } } for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++) { if (col_offd_S_to_A) j1 = map_A_to_S[A_offd_j[j]]; else j1 = A_offd_j[j]; if (j1 > -1 && P_marker_offd[j1] == i1) { j_start = Pext_start[j1]; j_end = j_start+Pext_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; alfa = A_offd_data[j]*Pext_data[k]; if (k1 < 0) P_diag_data[C_array[-k1-1]] += alfa; else P_offd_data[C_array_offd[k1]] += alfa; if (alfa < 0) { sum_C_neg += alfa; sum_N_neg += alfa; } else { sum_C_pos += alfa; sum_N_pos += alfa; } } } else { if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func_offd[j1] == dof_func[i1])) { if ( A_offd_data[j] < 0) sum_N_neg += A_offd_data[j]; else sum_N_pos += A_offd_data[j]; } } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C_neg*diagonal) alfa = -sum_N_neg/(sum_C_neg*diagonal); if (sum_C_pos*diagonal) beta = -sum_N_pos/(sum_C_pos*diagonal); for (j=P_diag_i[i1]; j < P_diag_i[i1+1]; j++) if (P_diag_data[j] < 0) P_diag_data[j] *= alfa; else P_diag_data[j] *= beta; for (j=P_offd_i[i1]; j < P_offd_i[i1+1]; j++) if (P_offd_data[j] < 0) P_offd_data[j] *= alfa; else P_offd_data[j] *= beta; } hypre_TFree(C_array); hypre_TFree(C_array_offd); hypre_TFree(P_marker); if (num_cols_offd) { hypre_TFree(P_marker_offd); } } /* End OMP Parallel Section */ hypre_TFree(P_diag_pass[pass]); if (num_procs > 1) { hypre_TFree(P_offd_pass[pass]); hypre_TFree(Pext_pass[pass]); } } /* End num_passes for-loop */ } else /* no distinction between positive and negative offdiagonal element */ { pass_length = pass_pointer[2]-pass_pointer[1]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,k,k1,i,i1,j,j1,sum_C,sum_N,j_start,j_end,cnt,tmp_marker,tmp_marker_offd,cnt_offd,diagonal,alfa) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for pass one. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ /* Initialize thread-wise variables */ tmp_marker = NULL; if (n_fine) { tmp_marker = hypre_CTAlloc(HYPRE_Int,n_fine); } tmp_marker_offd = NULL; if (num_cols_offd) { tmp_marker_offd = hypre_CTAlloc(HYPRE_Int,num_cols_offd); } for (i=0; i < n_fine; i++) { tmp_marker[i] = -1; } for (i=0; i < num_cols_offd; i++) { tmp_marker_offd[i] = -1; } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[1] + (pass_length/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { thread_stop = pass_pointer[1] + pass_length; } else { thread_stop = pass_pointer[1] + (pass_length/num_threads)*(my_thread_num+1); } /* determine P for points of pass 1, i.e. neighbors of coarse points */ for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C = 0; sum_N = 0; j_start = P_diag_start[i1]; j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_diag_pass[1][j]; tmp_marker[C_array[k1]] = i1; } cnt = P_diag_i[i1]; for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++) { j1 = A_diag_j[j]; if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) sum_N += A_diag_data[j]; if (j1 != -1 && tmp_marker[j1] == i1) { P_diag_data[cnt] = A_diag_data[j]; P_diag_j[cnt++] = fine_to_coarse[j1]; sum_C += A_diag_data[j]; } } j_start = P_offd_start[i1]; j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_offd_pass[1][j]; tmp_marker_offd[C_array_offd[k1]] = i1; } cnt_offd = P_offd_i[i1]; for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++) { if (col_offd_S_to_A) j1 = map_A_to_S[A_offd_j[j]]; else j1 = A_offd_j[j]; if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func_offd[j1])) sum_N += A_offd_data[j]; if (j1 != -1 && tmp_marker_offd[j1] == i1) { P_offd_data[cnt_offd] = A_offd_data[j]; P_offd_j[cnt_offd++] = map_S_to_new[j1]; sum_C += A_offd_data[j]; } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C*diagonal) alfa = -sum_N/(sum_C*diagonal); for (j=P_diag_i[i1]; j < cnt; j++) P_diag_data[j] *= alfa; for (j=P_offd_i[i1]; j < cnt_offd; j++) P_offd_data[j] *= alfa; } hypre_TFree(tmp_marker); hypre_TFree(tmp_marker_offd); } /* end OMP parallel region */ old_Pext_send_size = 0; old_Pext_recv_size = 0; if (n_coarse) hypre_TFree(C_array); hypre_TFree(C_array_offd); hypre_TFree(P_diag_pass[1]); if (num_procs > 1) hypre_TFree(P_offd_pass[1]); for (pass = 2; pass < num_passes; pass++) { if (num_procs > 1) { Pext_send_size = Pext_send_map_start[pass][num_sends]; if (Pext_send_size > old_Pext_send_size) { hypre_TFree(Pext_send_data); Pext_send_data = hypre_CTAlloc(double, Pext_send_size); } old_Pext_send_size = Pext_send_size; cnt_offd = 0; for (i=0; i < num_sends; i++) { for (j=send_map_start[i]; j < send_map_start[i+1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass-1) { j_start = P_diag_i[j1]; j_end = P_diag_i[j1+1]; for (k=j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_diag_data[k]; } j_start = P_offd_i[j1]; j_end = P_offd_i[j1+1]; for (k=j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_offd_data[k]; } } } } hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = Pext_send_map_start[pass]; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = Pext_recv_vec_start[pass]; Pext_recv_size = Pext_recv_vec_start[pass][num_recvs]; if (Pext_recv_size > old_Pext_recv_size) { hypre_TFree(Pext_data); Pext_data = hypre_CTAlloc(double, Pext_recv_size); } old_Pext_recv_size = Pext_recv_size; comm_handle = hypre_ParCSRCommHandleCreate (1, tmp_comm_pkg, Pext_send_data, Pext_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(Pext_send_map_start[pass]); hypre_TFree(Pext_recv_vec_start[pass]); } pass_length = pass_pointer[pass+1]-pass_pointer[pass]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,k,k1,i,i1,j,j1,sum_C,sum_N,j_start,j_end,cnt,tmp_marker,tmp_marker_offd,cnt_offd,diagonal,alfa,tmp_array,tmp_array_offd) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for passes >= 2. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ /* Initialize thread-wise variables */ tmp_marker = NULL; if (n_fine) { tmp_marker = hypre_CTAlloc(HYPRE_Int,n_fine); } tmp_marker_offd = NULL; if (num_cols_offd) { tmp_marker_offd = hypre_CTAlloc(HYPRE_Int,num_cols_offd); } tmp_array = NULL; if (n_coarse) { tmp_array = hypre_CTAlloc(HYPRE_Int,n_coarse); } tmp_array_offd = NULL; if (new_num_cols_offd > n_coarse_offd) { tmp_array_offd = hypre_CTAlloc(HYPRE_Int,new_num_cols_offd); } else { tmp_array_offd = hypre_CTAlloc(HYPRE_Int,n_coarse_offd);} for (i=0; i < n_fine; i++) { tmp_marker[i] = -1; } for (i=0; i < num_cols_offd; i++) { tmp_marker_offd[i] = -1; } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[pass] + (pass_length/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { thread_stop = pass_pointer[pass] + pass_length; } else { thread_stop = pass_pointer[pass] + (pass_length/num_threads)*(my_thread_num+1); } for (i=thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C = 0; sum_N = 0; j_start = P_diag_start[i1]; j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1]; cnt = P_diag_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_diag_pass[pass][j]; tmp_array[k1] = cnt; P_diag_data[cnt] = 0; P_diag_j[cnt++] = k1; } j_start = P_offd_start[i1]; j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1]; cnt_offd = P_offd_i[i1]; for (j=j_start; j < j_end; j++) { k1 = P_offd_pass[pass][j]; tmp_array_offd[k1] = cnt_offd; P_offd_data[cnt_offd] = 0; P_offd_j[cnt_offd++] = k1; } for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass-1) tmp_marker[j1] = i1; } for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass-1) tmp_marker_offd[j1] = i1; } for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++) { j1 = A_diag_j[j]; if (tmp_marker[j1] == i1) { for (k=P_diag_i[j1]; k < P_diag_i[j1+1]; k++) { k1 = P_diag_j[k]; alfa = A_diag_data[j]*P_diag_data[k]; P_diag_data[tmp_array[k1]] += alfa; sum_C += alfa; sum_N += alfa; } for (k=P_offd_i[j1]; k < P_offd_i[j1+1]; k++) { k1 = P_offd_j[k]; alfa = A_diag_data[j]*P_offd_data[k]; P_offd_data[tmp_array_offd[k1]] += alfa; sum_C += alfa; sum_N += alfa; } } else { if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) sum_N += A_diag_data[j]; } } for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++) { if (col_offd_S_to_A) j1 = map_A_to_S[A_offd_j[j]]; else j1 = A_offd_j[j]; if (j1 > -1 && tmp_marker_offd[j1] == i1) { j_start = Pext_start[j1]; j_end = j_start+Pext_i[j1+1]; for (k=j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; alfa = A_offd_data[j]*Pext_data[k]; if (k1 < 0) P_diag_data[tmp_array[-k1-1]] += alfa; else P_offd_data[tmp_array_offd[k1]] += alfa; sum_C += alfa; sum_N += alfa; } } else { if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func_offd[j1] == dof_func[i1])) sum_N += A_offd_data[j]; } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C*diagonal) alfa = -sum_N/(sum_C*diagonal); for (j=P_diag_i[i1]; j < P_diag_i[i1+1]; j++) P_diag_data[j] *= alfa; for (j=P_offd_i[i1]; j < P_offd_i[i1+1]; j++) P_offd_data[j] *= alfa; } hypre_TFree(tmp_marker); hypre_TFree(tmp_marker_offd); hypre_TFree(tmp_array); hypre_TFree(tmp_array_offd); } /* End OMP Parallel Section */ hypre_TFree(P_diag_pass[pass]); if (num_procs > 1) { hypre_TFree(P_offd_pass[pass]); hypre_TFree(Pext_pass[pass]); } } } hypre_TFree(CF_marker_offd); hypre_TFree(Pext_send_map_start); hypre_TFree(Pext_recv_vec_start); hypre_TFree(dof_func_offd); hypre_TFree(Pext_send_data); hypre_TFree(Pext_data); hypre_TFree(P_diag_pass); hypre_TFree(P_offd_pass); hypre_TFree(Pext_pass); hypre_TFree(P_diag_start); hypre_TFree(P_offd_start); hypre_TFree(Pext_start); hypre_TFree(Pext_i); hypre_TFree(fine_to_coarse); hypre_TFree(assigned); hypre_TFree(assigned_offd); hypre_TFree(pass_pointer); hypre_TFree(pass_array); hypre_TFree(map_S_to_new); hypre_TFree(map_A_to_S); if (num_procs > 1) hypre_TFree(tmp_comm_pkg); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max and/or keep yat most <P_max_elmts> per row absolutely maximal coefficients */ if (trunc_factor != 0.0 || P_max_elmts != 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, P_max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); } P_offd_size = P_offd_i[n_fine]; num_cols_offd_P = 0; if (P_offd_size) { if (new_num_cols_offd > num_cols_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int,new_num_cols_offd); } else { P_marker_offd = hypre_CTAlloc(HYPRE_Int,num_cols_offd); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < new_num_cols_offd; i++) { P_marker_offd[i] = 0; } num_cols_offd_P = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker_offd[index]) { num_cols_offd_P++; P_marker_offd[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_Int,num_cols_offd_P); permute = hypre_CTAlloc(HYPRE_Int, new_counter[num_passes-1]); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < new_counter[num_passes-1]; i++) permute[i] = -1; cnt = 0; for (i=0; i < num_passes-1; i++) { for (j=new_counter[i]; j < new_counter[i+1]; j++) { if (P_marker_offd[j]) { col_map_offd_P[cnt] = new_elmts[i][j-new_counter[i]]; permute[j] = col_map_offd_P[cnt++]; } } } qsort0(col_map_offd_P,0,num_cols_offd_P-1); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,k1) HYPRE_SMP_SCHEDULE #endif for (i=0; i < new_counter[num_passes-1]; i++) { k1 = permute[i]; if (k1 != -1) permute[i] = hypre_BinarySearch(col_map_offd_P,k1,num_cols_offd_P); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) { P_offd_j[i] = permute[P_offd_j[i]]; } hypre_TFree(P_marker_offd); } if (num_procs > 1) { for (i=0; i < num_passes-1; i++) hypre_TFree(new_elmts[i]); } hypre_TFree(permute); hypre_TFree(new_elmts); hypre_TFree(new_counter); if (num_cols_offd_P) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_offd_P; } if (n_SF) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; } if (num_procs > 1) { hypre_MatvecCommPkgCreate(P); } *P_ptr = P; /* wall_time = hypre_MPI_Wtime() - wall_time; hypre_printf("TOTAL TIME %1.2e \n",wall_time); */ /*----------------------------------------------------------------------- * Build and return dof_func array for coarse grid. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Free mapping vector and marker array. *-----------------------------------------------------------------------*/ return(0); }
GB_unop__log2_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__log2_fp64_fp64) // op(A') function: GB (_unop_tran__log2_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = log2 (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = log2 (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = log2 (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG2 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__log2_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = log2 (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = log2 (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__log2_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Example3.c
//#include <stdio.h> //#include <omp.h> //#include <conio.h> // //int main(int argc, char *argv[]) //{ // int tid; //#pragma omp parallel num_threads(4) shared(tid) // { //#pragma omp single //Single structure is executed by any thread // { // tid = omp_get_thread_num(); // printf("Single structure is executed by thread %d \n", tid); // } // /* A barrier is automatically added here */ // printf("This code is executed by the thread %d.\n", omp_get_thread_num()); // } // _getch(); // for keep console from <conio.h> library // return 0; //}
GB_binop__rdiv_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_fc64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_fc64) // A.*B function (eWiseMult): GB (_AemultB_03__rdiv_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_fc64) // A*D function (colscale): GB (_AxD__rdiv_fc64) // D*A function (rowscale): GB (_DxB__rdiv_fc64) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_fc64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_fc64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_fc64) // C=scalar+B GB (_bind1st__rdiv_fc64) // C=scalar+B' GB (_bind1st_tran__rdiv_fc64) // C=A+scalar GB (_bind2nd__rdiv_fc64) // C=A'+scalar GB (_bind2nd_tran__rdiv_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GB_FC64_div (bij, aij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_FC64_div (y, x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_FC64 || GxB_NO_RDIV_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__rdiv_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = Bx [p] ; Cx [p] = GB_FC64_div (bij, x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = Ax [p] ; Cx [p] = GB_FC64_div (y, aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_FC64_div (aij, x) ; \ } GrB_Info GB (_bind1st_tran__rdiv_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_FC64_div (y, aij) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rose_v1_break2.c
#include <omp.h> int i; int j; int a[100][100]; void foo() { #pragma omp parallel for private (i) for (i = 0; i <= 99; i += 1) { for (j = 0; j <= 99; j += 1) { a[i][j] = a[i][j] + 1; if (a[i][j] == 100) break; } } }
ast-dump-openmp-master.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test() { #pragma omp master ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-master.c:3:1, line:6:1> line:3:6 test 'void ()' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:6:1> // CHECK-NEXT: `-OMPMasterDirective {{.*}} <line:4:1, col:19> // CHECK-NEXT: `-NullStmt {{.*}} <line:5:3>
cosmem.h
#ifndef COS_COS_COSMEM_H #define COS_COS_COSMEM_H /** * C Object System * COS memory allocator * * Copyright 2006+ Laurent Deniau <laurent.deniau@gmail.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef COS_COS_COS_H #error "COS: use <cos/cos/cos.h> instead of <cos/cos/cosmem.h>" #endif /* NOTE-TODO: to clean for better use of COS framework... */ /* NOTE-USER: memory allocator front-end Purpose: provide fast memory allocator per-thread. allocated memory can be used-by/moved-to any thread (global allocator). Information: - parameters ending with an underscope can be null. - cos_mem_free does not free the memory, it just returns it to the pool. - cos_mem_xxx_n are ~50% faster, but objects must have the SAME cos_mem_size. - cos_mem_alloc_n returns the number of objects effectively allocated. - cos_mem_collect does free the unused memory of the thread specific pool. - cos_mem_collect and cos_mem_unused can be called for a specific cos_mem_size. Errors: - cos_mem_free_n on objects of different cos_mem_size is undefined. - cos_mem_free[_n] on objects not allocated by cos_mem_alloc[_n] is undefined. Compilation: - set cos_mem_AS_DEFAULT to 1 to activate it for the standard allocator. */ // --- interface -------------------------------------------------------------- // allocator static void* cos_mem_alloc (size_t size); static void* cos_mem_calloc (size_t count, size_t size); static void* cos_mem_realloc (void *ptr_, size_t new_size); static void cos_mem_free (void *ptr_); static size_t cos_mem_size (void *ptr_); // allocator, array versions static int cos_mem_alloc_n (void *ptr_[], int n, size_t size); static void cos_mem_free_n (void *ptr_[], int n); // tuning extern size_t cos_mem_unused (int *count_, size_t *only_); extern size_t cos_mem_collect (int *percent_, size_t *only_); // --- configuration ----------------------------------------------------------- // set COS_MEM_AS_DEFAULT to 1 to activate this front-end for the standard allocator. #ifndef COS_MEM_AS_DEFAULT #define COS_MEM_AS_DEFAULT 1 #endif // --- inline implementation --------------------------------------------------- #include <string.h> #include <assert.h> // memory node union cos_mem_node_ { struct { union cos_mem_node_ *next; } free; struct { unsigned slot; union { long n, *np; size_t s, *sp; double d, *dp; void (*fp)(void); } data[1]; } used; }; struct cos_mem_slot_ { union cos_mem_node_ *list; }; // pool size enum { cos_mem_node_size_ = offsetof(union cos_mem_node_, used.data), cos_mem_slot_size_ = sizeof(struct cos_mem_slot_), cos_mem_slot_max_ = (1<<16)/cos_mem_slot_size_, // max pool size: ~64kB/thread }; // memory pool struct cos_mem_pool_ { size_t size; struct cos_mem_slot_ slot[cos_mem_slot_max_]; }; // pool (per thread) extern struct cos_mem_pool_ cos_mem_pool_; // max unused memory (per thread), default ~32MB extern size_t cos_mem_pool_max_; #ifdef _OPENMP #pragma omp threadprivate (cos_mem_pool_) #endif #if !defined(__GNUC__) && !defined(__attribute__) #define __attribute__(a) #endif #ifdef __cplusplus # define restrict # define ATT __attribute__((always_inline)) inline #else # define ATT __attribute__((always_inline)) static inline #endif // -- utils ATT __attribute__((pure)) union cos_mem_node_* cos_mem_get_base_ (void *ptr) { return (union cos_mem_node_*)((char*)ptr - cos_mem_node_size_); } ATT __attribute__((pure)) unsigned cos_mem_get_slot_ (size_t size) { return (size + cos_mem_node_size_-1) / cos_mem_node_size_; } ATT __attribute__((pure)) void* cos_mem_node_init_ (union cos_mem_node_ *ptr, unsigned slot) { ptr->used.slot = slot; return ptr->used.data; } // -- allocator ATT __attribute__((malloc)) void* cos_mem_alloc (size_t size) { unsigned slot = cos_mem_get_slot_(size); struct cos_mem_pool_ *restrict pool = &cos_mem_pool_; struct cos_mem_slot_ *restrict pptr = pool->slot+slot; union cos_mem_node_ *ptr; if (slot < cos_mem_slot_max_ && pptr->list) { pool->size -= slot*cos_mem_node_size_; ptr = pptr->list, pptr->list = ptr->free.next; } else { if (pool->size > cos_mem_pool_max_) cos_mem_collect(0,0); ptr = (union cos_mem_node_*)malloc((slot+1) * cos_mem_node_size_); if (!ptr) return 0; } return cos_mem_node_init_(ptr, slot); } ATT __attribute__((malloc)) void* cos_mem_calloc (size_t count, size_t size) { size_t sz = count*size; assert(count < 2 || size < 2 || (sz > count && sz > size)); void *ptr = cos_mem_alloc(sz); if (ptr) memset(ptr, 0, sz); return ptr; } ATT __attribute__((malloc)) void* cos_mem_realloc (void* ptr_, size_t size) { unsigned slot = cos_mem_get_slot_(size); union cos_mem_node_ *ptr = ptr_ ? cos_mem_get_base_(ptr_) : (union cos_mem_node_*)ptr_; ptr = (union cos_mem_node_*)realloc(ptr, (slot+1) * cos_mem_node_size_); if (!ptr) return 0; ptr->used.slot = slot; return ptr->used.data; } ATT void cos_mem_free (void* ptr_) { if (!ptr_) return; union cos_mem_node_ *ptr = cos_mem_get_base_(ptr_); unsigned slot = ptr->used.slot; if (slot < cos_mem_slot_max_) { struct cos_mem_pool_ *restrict pool = &cos_mem_pool_; struct cos_mem_slot_ *restrict pptr = pool->slot+slot; pool->size += slot*cos_mem_node_size_; ptr->free.next = pptr->list, pptr->list = ptr; } else free(ptr); } // -- allocator, array versions ATT int cos_mem_alloc_n (void* ptr_[], int n, size_t size) { unsigned slot = cos_mem_get_slot_(size); union cos_mem_node_ *ptr; int i = 0; if (slot < cos_mem_slot_max_) { struct cos_mem_pool_ *restrict pool = &cos_mem_pool_; struct cos_mem_slot_ *restrict pptr = pool->slot+slot; for (; i < n && pptr->list; i++) { ptr = pptr->list, pptr->list = ptr->free.next; ptr_[i] = cos_mem_node_init_(ptr, slot); } pool->size -= i*slot*cos_mem_node_size_; if (i < n && pool->size > cos_mem_pool_max_) cos_mem_collect(0,0); for (; i < n; i++) { ptr = (union cos_mem_node_*)malloc((slot+1) * cos_mem_node_size_); if (ptr) ptr_[i] = cos_mem_node_init_(ptr, slot); else break; } } else { for (; i < n; i++) { ptr = (union cos_mem_node_*)malloc((slot+1) * cos_mem_node_size_); if (ptr) ptr_[i] = cos_mem_node_init_(ptr, slot); else break; } } return i; } ATT void cos_mem_free_n (void* ptr_[], int n) { if (n > 0) { unsigned slot = cos_mem_get_base_(ptr_[n-1])->used.slot; if (slot < cos_mem_slot_max_) { struct cos_mem_pool_ *restrict pool = &cos_mem_pool_; struct cos_mem_slot_ *restrict pptr = pool->slot+slot; int i = n; while (n--) { union cos_mem_node_ *ptr = cos_mem_get_base_(ptr_[n]); ptr->free.next = pptr->list, pptr->list = ptr; } pool->size += i*slot*cos_mem_node_size_; } else while (n--) free(cos_mem_get_base_(ptr_[n])); } } // -- tuning ATT __attribute__((pure)) size_t cos_mem_size (void* ptr_) { return ptr_ ? cos_mem_get_base_(ptr_)->used.slot * cos_mem_node_size_ : 0; } #undef ATT // --- global redefinition ---------------------------------------------------- #if COS_MEM_AS_DEFAULT #define malloc(size) cos_mem_alloc(size) #define calloc(count,size) cos_mem_calloc(count,size) #define realloc(ptr,size) cos_mem_realloc(ptr,size) #define free(ptr) cos_mem_free(ptr) #endif // ---------------------------------------------------------------------------- #endif // COS_COS_COSMEM_H
serial_unit.c
#include <stdio.h> #include <errno.h> // for errno #include <math.h> #include <limits.h> // for INT_MAX #include <stdlib.h> // for strtol #include <time.h> #include <omp.h> long max_number_of_char = 10; long number_of_types_char = 10; long number_of_results = 5; long number_of_queues = 2; typedef struct Client { long* initial_characteristics; long** derived_characteristics; long number_of_init_chars; long identifier; long result; } Client; long rand_between(long l, long r, unsigned int seed) { int value; #pragma omp critical (rand) { srand(seed); value = rand(); } return (long) (l + (value % (r - l + 1))); } Client* createClient(long id, unsigned int seed) { Client* client = malloc(sizeof(Client)); client->number_of_init_chars = rand_between(0, max_number_of_char, seed); seed += client->number_of_init_chars; if(client->number_of_init_chars > 0) { client->initial_characteristics = malloc(client->number_of_init_chars*sizeof(long)); //printf("id: %ld - init size: %ld\n", id, client->number_of_init_chars); for (long i = 0; i < client->number_of_init_chars; ++i) { client->initial_characteristics[i] = rand_between(1, number_of_types_char, seed); seed += client->initial_characteristics[i]; } if(number_of_queues - 1 > 0){ client->derived_characteristics = malloc((number_of_queues-1)*sizeof(long*)); for (long i = 0; i < number_of_queues-1; ++i) { client->derived_characteristics[i] = malloc((client->number_of_init_chars+i+1)*sizeof(long)); //printf("id: %ld - level: %ld - size: %ld\n", id, i, client->number_of_init_chars+i+1); } } } else { client->initial_characteristics = NULL; client->derived_characteristics = NULL; } client->identifier = id; client->result = -1; return client; } void destroyClient(Client* client){ if(client->number_of_init_chars > 0) { free(client->initial_characteristics); if(number_of_queues-1 > 0){ for (long i = 0; i < number_of_queues-1; ++i) { free(client->derived_characteristics[i]); } free(client->derived_characteristics); } } free(client); } void printClient(Client* client, short int detail_level) { #pragma omp critical (print) { if(detail_level > 1) { printf("\nId: %ld\n", client->identifier); printf("Result: %ld\n", client->result); if(detail_level > 1) { printf("Nº of characteristics: %ld\n", client->number_of_init_chars); if(client->number_of_init_chars > 0) { printf("Characteristics: %ld", client->initial_characteristics[0]); for (long i = 1; i < client->number_of_init_chars; ++i) { printf(", %ld", client->initial_characteristics[i]); } } for (long i = 0; i < number_of_queues - 1; ++i) { if(detail_level > i+2) { if(client->number_of_init_chars > 0) { printf("\nCharacteristics on queue %ld: %ld", i+1, client->derived_characteristics[i][0]); for (long j = 1; j < client->number_of_init_chars+i+1; ++j) { printf(", %ld", client->derived_characteristics[i][j]); } } } } } printf("\n"); } } } void printClientCSV(Client* client) { printf("\n%ld, %ld, %ld\n", client->identifier, client->result, client->number_of_init_chars); } void printCSV(long* ids, long* results, long* n_of_chars, long number_of_clients) { printf("ID,Result,Nº Initial Characteristics\n"); for (long i = 0; i < number_of_clients; ++i) { printf("%ld, %ld, %ld\n", ids[i], results[i], n_of_chars[i]); } } long initialCharProcess(Client* client) { long value = 0; for (long i = 0; i < client->number_of_init_chars; ++i) { value += client->initial_characteristics[i]; } return (value / (client->number_of_init_chars + 1)) + (value % (client->number_of_init_chars + 1)); } long levelCharProcess(Client* client, long* values, long level) { //printf("client: %ld - chars: %ld\n", client->identifier, client->number_of_init_chars); long* origin = NULL; if (level == 0) { origin = client->initial_characteristics; } else { origin = client->derived_characteristics[level-1]; } long value = 0; //printf("client: %ld - level: %ld - level size: %ld\n", client->identifier, level, client->number_of_init_chars+level); for (long i = 0; i < client->number_of_init_chars+level-1; ++i) { client->derived_characteristics[level][i] = 0; for (long j = 0; j < client->number_of_init_chars+level; ++j) { client->derived_characteristics[level][i] += abs(origin[i] - origin[j]); } //printf("client: %ld - %ld: %ld\n", client->identifier,i, client->derived_characteristics[level][i]); value += client->derived_characteristics[level][i]; } client->derived_characteristics[level][client->number_of_init_chars+level-1] = abs(origin[client->number_of_init_chars+level-1] - origin[0]); //printf("client: %ld - %ld: %ld\n", client->identifier,client->number_of_init_chars+level-1, client->derived_characteristics[level][client->number_of_init_chars+level-1]); client->derived_characteristics[level][client->number_of_init_chars+level] = values[level]; //printf("client: %ld - %ld: %ld\n", client->identifier,client->number_of_init_chars+level, client->derived_characteristics[level][client->number_of_init_chars+level]); value += client->derived_characteristics[level][client->number_of_init_chars+level-1]; value += client->derived_characteristics[level][client->number_of_init_chars+level]; return (value * client->number_of_init_chars) % number_of_types_char; } long categoryFromValue(long* values) { long result = 0; if(values[0] == 0){ return 0; } for (long i = 0; i < number_of_queues; ++i) { result += values[i] * (i+1); } result = result / number_of_queues; return 1 + ( result % number_of_results); } void categorizeClient(Client* client) { if(client->number_of_init_chars == 0) { client->result = 0; return; } long* values = calloc(number_of_queues, sizeof(long)); values[0] = initialCharProcess(client); //printf("initial value: %ld\n", values[0]); for (long i = 0; i < number_of_queues-1; ++i) { values[i+1] += levelCharProcess(client, values, i); //printf("value %ld: %ld\n", i, values[i]); } client->result = categoryFromValue(values); free(values); } long convert_str_long(char *str){ char *p; errno = 0; long conv = strtol(str, &p, 10); if (errno != 0 || *p != '\0') { printf("%s não é um número!\n", str); exit(-1); } return (long)conv; } int main(int argc, char **argv){ if (argc != 8) { printf("É necessário informar os seguintes argumentos:\n"); printf("Seed usada para gerar os dados\n"); printf("Número de clientes a serem criados\n"); printf("Quantidade máxima de caracteristicas por cliente\n"); printf("Quantidade de tipos de caracteristicas\n"); printf("Quantidade de resultados diferentes do 0\n"); printf("Número de etapas a serem utilizadas para processar o resultado\n"); printf("Nível de detalhe da exibição dos resultados:\n"); printf(" - Caso seja 0: Imprime apenas o tempo gasto\n"); printf(" - Caso seja 1: Imprime o Id, o Resultado e o Nª de categorias de cada cliente em um .csv\n"); printf(" - Caso seja n: Imprime os dados de cada cliente detalhando as caracteristicas de até n-1 etapas e ao final os dados de detalhe 1\n"); return -1; } unsigned int seed = convert_str_long(argv[1]); long number_of_clients = convert_str_long(argv[2]); max_number_of_char = convert_str_long(argv[3]); number_of_types_char = convert_str_long(argv[4]); number_of_results = convert_str_long(argv[5]); number_of_queues = convert_str_long(argv[6]); long detail_level = convert_str_long(argv[7]); long* ids = malloc(number_of_clients*sizeof(long)); long* results = malloc(number_of_clients*sizeof(long)); long* n_of_chars = malloc(number_of_clients*sizeof(long)); Client** clients = malloc(number_of_clients*sizeof(Client*)); for (long i = 0; i < number_of_clients; ++i) { clients[i] = createClient(i, seed+i); } double t = omp_get_wtime(); for (long i = 0; i < number_of_clients; ++i) { categorizeClient(clients[i]); ids[i] = clients[i] ->identifier; results[i] = clients[i] ->result; n_of_chars[i] = clients[i] ->number_of_init_chars; printClient(clients[i] , detail_level); } t = omp_get_wtime() - t; for (long i = 0; i < number_of_clients; ++i) { destroyClient(clients[i]); } if(detail_level > 0) { printCSV(ids, results, n_of_chars, number_of_clients); } else { printf("%.10lf\n", t); } free(clients); free(ids); free(results); free(n_of_chars); return 0; } /* main */
num_diff.h
#if !defined(__num_diff_h) #define __num_diff_h /* Library for numerical differentiation. Derivative in j direction: partial_j f'(x) = 1/h_j sum_{i=1}^n a_i f(x + b_i*h_j e_j) + A h_j^p f^{(p+1)}(c) direction: x in T^d e_j = ( delta_{kj} )_{k=1}^d Ref: * http://en.wikipedia.org/wiki/Numerical_differentiation * http://www.holoborodko.com/pavel/numerical-methods/numerical-derivative/smooth-low-noise-differentiators/ * http://www.holoborodko.com/pavel/numerical-methods/numerical-derivative/lanczos-low-noise-differentiators Author: Martin Horvat, November 2013 */ #include <cmath> #include <vector> #include <limits> //#define DEBUG enum Tnum_diff { central_3 = 0, central_5, central_7, central_9 }; template <class T> struct Tnum_diff_scheme { int n, p; T A; std::vector<T> a, b; }; /* Finite difference coefficient: Ref: * http://en.wikipedia.org/wiki/Finite_difference_coefficients * Fornberg, Bengt (1988), "Generation of Finite Difference Formulas on Arbitrarily Spaced Grids", Mathematics of Computation 51 (184): 699–706, doi:10.1090/S0025-5718-1988-0935077-0, ISSN 0025-5718 * http://www.holoborodko.com/pavel/numerical-methods/numerical-derivative/central-differences/ */ Tnum_diff_scheme<double> const num_diff_schemes_d [] = { // approximated via 3 point central differences, error: O(h^2) // Tnum_diff type = central_3 { 2, 2, -1./6, {-1./2, 1./2}, {-1, 1} }, // approximated via 5 point central differences, error: O(h^4) // Tnum_diff type = central_5 { 4, 4, +1./30, {1./12, -8./12, 8./12, -1./12}, {-2, -1, 1, 2} }, // approximated via 7 point central differences, error: O(h^6) // Tnum_diff type = central_7 { 6, 6, -1./140, {-1./60, +9./60, -45./60, +45./60, -9./60, +1./60}, {-3, -2, -1, 1, 2, 3} }, // approximated via 9 point central differences, error: O(h^8) // Tnum_diff type = central_9 { 8, 8, 1./630, { 3./840, -32./840, 168./840, -672./840, 672./840, -168./840, 32./840, -3./840}, {-4, -3, -2, -1, 1, 2, 3, 4} } }; void num_diff_set_scheme(const Tnum_diff &type, int &n, double *&a, double *&b){ n = num_diff_schemes_d[type].n; a = const_cast<double *> (num_diff_schemes_d[type].a.data()); b = const_cast<double *> (num_diff_schemes_d[type].b.data()); } void num_diff_set_scheme(const Tnum_diff &type, int &n, int &p, double &A, double *&a, double *&b){ n = num_diff_schemes_d[type].n; p = num_diff_schemes_d[type].p; A = num_diff_schemes_d[type].A; a = const_cast<double *> (num_diff_schemes_d[type].a.data()); b = const_cast<double *> (num_diff_schemes_d[type].b.data()); } Tnum_diff_scheme<long double> const num_diff_schemes_l [] = { // approximated via 3 point central differences, error: O(h^2) // Tnum_diff type = central_3 { 2, 2, -1.L/6, {-1.L/2, 1.L/2}, {-1L, 1L} }, // approximated via 5 point central differences, error: O(h^4) // Tnum_diff type = central_5 { 4, 4, 1.L/30, {1.L/12, -8.L/12, 8.L/12, -1.L/12}, {-2L, -1L, 1L, 2L} }, // approximated via 7 point central differences, error: O(h^6) // Tnum_diff type = central_7 { 6, 6, -1.L/140, {-1.L/60, +9.L/60, -45.L/60, +45.L/60, -9.L/60, +1.L/60}, {-3L, -2L, -1L, 1L, 2L, 3L} }, // approximated via 9 point central differences, error: O(h^8) // Tnum_diff type = central_9 { 8, 8, 1.L/630, { 3.L/840, -32.L/840, 168.L/840, -672.L/840, 672.L/840, -168.L/840, 32.L/840, -3.L/840}, {-4L, -3L, -2L, -1L, 1L, 2L, 3L, 4L} } }; void num_diff_set_scheme(const Tnum_diff &type, int &n, long double *&a, long double *&b){ n = num_diff_schemes_l[type].n; a = const_cast<long double *> (num_diff_schemes_l[type].a.data()); b = const_cast<long double *> (num_diff_schemes_l[type].b.data()); } void num_diff_set_scheme(const Tnum_diff &type, int &n, int &p, long double &A, long double *&a, long double *&b){ n = num_diff_schemes_l[type].n; p = num_diff_schemes_l[type].p; A = num_diff_schemes_l[type].A; a = const_cast<long double *> (num_diff_schemes_l[type].a.data()); b = const_cast<long double *> (num_diff_schemes_l[type].b.data()); } #if defined(_QUAD) Tnum_diff_scheme<quad> const num_diff_schemes_q [] = { // approximated via 3 point central differences, error: O(h^2) // Tnum_diff type = central_3 { 2, 2, -1.Q/6, {-1.Q/2, 1.Q/2}, {-1.Q, 1.Q} }, // approximated via 5 point central differences, error: O(h^4) // Tnum_diff type = central_5 { 4, 4, 1.Q/30, {1.Q/12, -8.Q/12, 8.Q/12, -1.Q/12}, {-2.Q, -1.Q, 1.Q, 2.Q} }, // approximated via 7 point central differences, error: O(h^6) // Tnum_diff type = central_7 { 6, 6, -1.Q/140, {-1.Q/60, +9.Q/60, -45.Q/60, +45.Q/60, -9.Q/60, +1.Q/60}, {-3.Q, -2.Q, -1.Q, 1.Q, 2.Q, 3.Q} }, // approximated via 9 point central differences, error: O(h^8) // Tnum_diff type = central_9 { 8, 8, 1.Q/630, { 3.Q/840, -32.Q/840, 168.Q/840, -672.Q/840, 672.Q/840, -168.Q/840, 32.Q/840, -3.Q/840}, {-4.Q, -3.Q, -2.Q, -1.Q, 1.Q, 2.Q, 3.Q, 4.Q} } }; void num_diff_set_scheme(const Tnum_diff &type, int &n, quad *&a, quad *&b){ n = num_diff_schemes_q[type].n; a = const_cast<quad *> (num_diff_schemes_q[type].a.data()); b = const_cast<quad *> (num_diff_schemes_q[type].b.data()); } void num_diff_set_scheme(const Tnum_diff &type, int &n, int &p, quad &A, quad *&a, quad *&b){ n = num_diff_schemes_q[type].n; p = num_diff_schemes_q[type].p; A = num_diff_schemes_q[type].A; a = const_cast<quad *> (num_diff_schemes_q[type].a.data()); b = const_cast<quad *> (num_diff_schemes_q[type].b.data()); } #endif /* Routine for numerical differentiation. Serial code. Input: type in Tnum_diff d -- dimension of domain h[d] -- step sizes in each dimension x[d] -- point at which derivative is calculated T func(T *x) -- function which is differentiated Output: df[d] - derivative */ template <class T> void num_diff( const Tnum_diff &type, const int &d, T *h, T *x, T func(T *x), T *df){ int n; T *a, *b; num_diff_set_scheme(type, n, a, b); int i, j, k; T *y = new T [d], sum; for (i = 0; i < d; ++i){ sum = 0; for (k = 0; k < n; ++k) { for (j = 0; j < d; ++j) y[j] = x[j]; y[i] += b[k]*h[i]; sum += a[k]*func(y); } df[i] = sum/h[i]; } delete [] y; } /* Routine for numerical differentiation. Parallelized first by dimension and then by function calls. Nested OpenMP. Input: type in Tnum_diff d -- dimension of domain h[d] -- step sizes in each dimension x[d] -- point at which derivative is calculated T func(T *x) -- function which is differentiated Output: df[d] - derivative */ template <class T> void num_diff_omp( const Tnum_diff &type, const int &d, T *h, T *x, T func(T *x), T *df){ int n; T *a, *b; num_diff_set_scheme(type, n, a, b); #pragma omp parallel for shared(x, a, b, df) for (int i = 0; i < d; ++i){ T sum = 0; #if defined(DEBUG) std::cerr << "num_diff::i=" << i << std::endl; #if defined(_OPENMP) std::cerr << "num_diff::OpenMP L1 id=" << omp_get_thread_num() << std::endl; #endif #endif #pragma omp parallel for shared(x, a, b) reduction(+:sum) for (int k = 0; k < n; ++k) { #if defined(DEBUG) std::cerr << "num_diff::i,k=" << i << "," << k << std::endl; #if defined(_OPENMP) std::cerr << "num_diff::OpenMP L2 id=" << omp_get_thread_num() << std::endl; #endif #endif T *y = new T [d]; for (int j = 0; j < d; ++j) y[j] = x[j]; y[i] += b[k]*h[i]; sum += a[k]*func(y); delete [] y; } df[i] = sum/h[i]; } } /* Routine for numerical differentiation. Openmp parallelization of only function evaluation without nesting. Input: type in Tnum_diff d -- dimension of domain h[d] -- step sizes in each dimension x[d] -- point at which derivative is calculated T func(T *x) -- function which is differentiated Output: df[d] - derivative */ template <class T> void num_diff_omp_f( const Tnum_diff &type, const int &d, T *h, T *x, T func(T *x), T *df){ int n; T *a, *b; num_diff_set_scheme(type, n, a, b); int i, j, k, len = n*d; T *f = new T [len], *y = new T [len*d], *p = y; for (i = 0; i < d; ++i) for (j = 0; j < n; ++j) { for (k = 0; k < d; ++k) p[k] = x[k]; p[i] += b[j]*h[i]; p += d; } #pragma omp parallel for shared(y,f) private(i) for (i = 0; i < len; ++i) f[i] = func(y + i*d); T sum; p = f; for (i = 0; i < d; ++i) { sum = 0; for (j = 0; j < n; ++j) sum += a[j]*p[j]; df[i] = sum/h[i]; p += n; } delete [] f; delete [] y; } /* Routine for numerical differentiation. Using Kahan summation to reduce rounding errors. Input: type in Tnum_diff d -- dimension of domain h[d] -- step sizes in each dimension x[d] -- point at which derivative is calculated T func(T *x) -- function which is differentiated Output: df[d] - derivative */ template <class T> void num_diff_kahan( const Tnum_diff &type, const int &d, T *h, T *x, T func(T *x), T *df){ int n; T *a, *b; num_diff_set_scheme(type, n, a, b); #pragma omp parallel for shared(x, a, b, df) for (int i = 0; i < d; ++i){ T sum = 0, // sum c = 0, // correction sum_, // partial sum t, // term of summation *y = new T [d]; #if defined(DEBUG) std::cerr << "num_diff_kahan::i=" << i << std::endl; #if defined(_OPENMP) std::cerr << "num_diff_kahan::OpenMP L1 id=" << omp_get_thread_num() << std::endl; #endif #endif for (int k = 0; k < n; ++k) { // parallelize !!! for (int j = 0; j < d; ++j) y[j] = x[j]; y[i] += b[k]*h[i]; t = a[k]*func(y) - c; sum_ = sum + t; c = static_cast<T>(sum_ - sum) - t; sum = sum_; } df[i] = sum/h[i]; delete [] y; } } /* Estimate optimal step size in all dimension. Serial code rewritten. Using OpenMP nested if enabled. The upper bound of the differential error is given by | Df(x) - f'(x) | <= |S|/h + |A| h^p |f^{p+1}| ^ ^ numerical exact with summation error S propto eps_0 where eps_0 is machine precision Meaning | Df(x) - f'(x) | <= eps_0 |B| /h + |A| h^p |f^{p+1}| Input: type in Tnum_diff d -- dimension of domain h[d] -- initial step sizes in each dimension x[d] -- point at which derivative is calculated T func(T *x) -- function which is differentiated Output: h[d] -- optimal step sizes in each dimension */ template <class T> void num_diff_estimate_step( const Tnum_diff &type, const int &d, T *h, T *x, T func(T *x)){ int n, p; T A, *a, *b, eps = std::numeric_limits<T>::epsilon(); num_diff_set_scheme(type, n, p, A, a, b); int i, j, k, r = p + 1; T dS, *y = new T [d], *f = new T [r + 1], t, fmax; for (i = 0; i < d; ++i) { // Estimate rounding error of the scheme // Ref: Higham, 1993 dS = 0; for (j = 0; j < n; ++j) { for (k = 0; k < d; ++k) y[k] = x[k]; y[i] += b[j]*h[i]; dS += std::abs(a[j]*func(y)); } dS *= eps*(n - 1); // sequential summation // dS *= 2*eps; // Kahan summation // Estimate r = p+1-th derivative using central differences: // // f^{(r)}(x) = h^{-r} delta_h^r[f](x) + O(h^2) // // with // delta_h^r[f](x) = sum_{i=0}^r (-1)^i Binom(r,i) f(x + (r/2-i)h) // // delta_h[f](x) = f(x+h/2) - f(x-h/2) // Ref: // http://en.wikipedia.org/wiki/Finite_difference for (j = 0; j <= r; ++j) { for (k = 0; k < d; ++k) y[k] = x[k]; y[i] += (T(r)/2 - j)*h[i]; f[j] = func(y); } fmax = std::abs(f[0]); for (j = 1; j <= r; ++j) { t = std::abs(f[j]); if (fmax < t) fmax = t; } // divided differences scheme for (j = 0; j < r; ++j) for (k = 0; k < r-j; ++k) f[k] -= f[k + 1]; if (std::abs(f[0]) < r*(r+1)*fmax*eps/2) { h[i] = std::abs(dS/(fmax*eps)); // dS/h ~ fmax*eps = abs. error of f } else { h[i] *= exp(log(std::abs(dS/(p*A*f[0])))/r); // min dS/h + A f^(r)/h^p } } delete [] y; delete [] f; } /* Estimate optimal step size in all dimension. Serial code rewritten. Using OpenMP nested if enabled. The upper bound of the differential error is given by | Df(x) - f'(x) | <= |S|/h + |A| h^p |f^{p+1}| ^ ^ numerical exact with summation error S propto eps_0 where eps_0 is machine precision Meaning | Df(x) - f'(x) | <= eps_0 |B| /h + |A| h^p |f^{p+1}| Input: type in Tnum_diff d -- dimension of domain h[d] -- initial step sizes in each dimension x[d] -- point at which derivative is calculated T func(T *x) -- function which is differentiated Output: h[d] -- optimal step sizes in each dimension */ template <class T> void num_diff_estimate_step_omp( const Tnum_diff &type, const int &d, T *h, T *x, T func(T *x)){ int n, p; T A, *a, *b, eps = std::numeric_limits<T>::epsilon(); num_diff_set_scheme(type, n, p, A, a, b); int r = p + 1; #pragma omp parallel for shared(x, a, b, h) for (int i = 0; i < d; ++i) { #if defined(DEBUG) std::cerr << "num_diff_estimate_step::i=" << i << std::endl; #if defined(_OPENMP) std::cerr << "num_diff_estimate_step::OpenMP L2 id=" << omp_get_thread_num() << std::endl; #endif #endif // Estimate rounding error of the scheme // Ref: Higham, 1993 T dS = 0; #if defined(DEBUG) std::cerr << "num_diff_estimate_step::estimate rounding error" << std::endl; #endif #pragma omp parallel for shared(x, a, b, h) reduction(+:dS) for (int j = 0; j < n; ++j) { #if defined(DEBUG) std::cerr << "num_diff_estimate_step::i,j=" << i << "," << j << std::endl; #if defined(_OPENMP) std::cerr << "num_diff_estimate_step::OpenMP L2 id=" << omp_get_thread_num() << std::endl; #endif #endif T *y = new T [d]; for (int k = 0; k < d; ++k) y[k] = x[k]; y[i] += b[j]*h[i]; dS += std::abs(a[j]*func(y)); delete [] y; } dS *= eps*(n - 1); // sequential summation // dS *= 2*eps; // Kahan summation // Estimate r = p+1-th derivative using central differences: // // f^{(r)}(x) = h^{-r} delta_h^r[f](x) + O(h^2) // // with // delta_h^r[f](x) = sum_{i=0}^r (-1)^i Binom(r,i) f(x + (r/2-i)h) // // delta_h[f](x) = f(x+h/2) - f(x-h/2) // Ref: // http://en.wikipedia.org/wiki/Finite_difference T *f = new T [r + 1]; #if defined(DEBUG) std::cerr << "num_diff_estimate_step::estimate f^{(p+1)}" << std::endl; #endif #pragma omp parallel for shared(x, a, b, f, h) for (int j = 0; j <= r; ++j) { #if defined(DEBUG) std::cerr << "num_diff_estimate_step::i,j=" << i << "," << j << std::endl; #if defined(_OPENMP) std::cerr << "num_diff_estimate_step::OpenMP L2 id=" << omp_get_thread_num() << std::endl; #endif #endif T *y = new T [d]; for (int k = 0; k < d; ++k) y[k] = x[k]; y[i] += (T(r)/2 - j)*h[i]; f[j] = func(y); delete [] y; } T t, fmax = std::abs(f[0]); for (int j = 1; j <= r; ++j) { t = std::abs(f[j]); if (fmax < t) fmax = t; } // divided differences scheme for (int j = 0; j < r; ++j) for (int k = 0; k < r-j; ++k) f[k] -= f[k + 1]; /* std::cerr << "#dS=" << dS << " A=" << A << " f[0]=" << f[0] << " df=" << f[0]/exp(log(h[i])*r) << '\n'; */ if (std::abs(f[0]) < r*(r+1)*fmax*eps/2) { h[i] = std::abs(dS/(fmax*eps)); // dS/h ~ fmax*eps = abs. error of f } else { h[i] *= exp(log(std::abs(dS/(p*A*f[0])))/r); // min dS/h + A f^(r)/h^p } delete [] f; } } /* Estimate optimal step size in all dimension. Intended for use with OpenMP without nesting, if enabled. The upper bound of the differential error is given by | Df(x) - f'(x) | <= |S|/h + |A| h^p |f^{p+1}| ^ ^ numerical exact with summation error S propto eps_0 where eps_0 is machine precision Meaning | Df(x) - f'(x) | <= eps_0 |B| /h + |A| h^p |f^{p+1}| Input: type in Tnum_diff d -- dimension of domain h[d] -- initial step sizes in each dimension x[d] -- point at which derivative is calculated T func(T *x) -- function which is differentiated Output: h[d] -- optimal step sizes in each dimension */ template <class T> void num_diff_estimate_step_omp_f( const Tnum_diff &type, const int &d, T *h, T *x, T func(T *x)){ int n, p; T A, *a, *b, eps = std::numeric_limits<T>::epsilon(); num_diff_set_scheme(type, n, p, A, a, b); // Estimate rounding error of the scheme // Ref: Higham, 1993 int i, j, k, len = d*n; T *y = new T [len*d], *q = y, tmp; for (i = 0; i < d; ++i) for (j = 0; j < n; ++j) { for (k = 0; k < d; ++k) q[k] = x[k]; q[i] += b[j]*h[i]; q += d; } T *f = new T [len]; #pragma omp parallel for shared(y, f) private(i) for (i = 0; i < len; ++i) f[i] = func(y + i*d); delete [] y; q = f; T *dS = new T [d]; for (i = 0; i < d; ++i) { tmp = 0; for (j = 0; j < n; ++j) tmp += std::abs(a[j]*q[j]); dS[i] = eps*(n - 1)*tmp; // sequential summation // dS[i] *= 2*eps; // Kahan summation q += n; } delete [] f; // Estimate r = p+1-th derivative using central differences: // // f^{(r)}(x) = h^{-r} delta_h^r[f](x) + O(h^2) // // with // delta_h^r[f](x) = sum_{i=0}^r (-1)^i Binom(r,i) f(x + (r/2-i)h) // // delta_h[f](x) = f(x+h/2) - f(x-h/2) // // and calculate optimal step size // // // Ref: // http://en.wikipedia.org/wiki/Finite_difference int r = p + 1; len = d*(r + 1); y = new T [len*d]; f = new T [len]; q = y; for (i = 0; i < d; ++i) for (j = 0; j <= r; ++j) { for (k = 0; k < d; ++k) q[k] = x[k]; q[i] += (T(r)/2 - j)*h[i]; q += d; } #pragma omp parallel for shared(y, f) private(i) for (i = 0; i < len; ++i) f[i] = func(y + i*d); delete [] y; T fmax, thresh; q = f; for (i = 0; i < d; ++i) { fmax = std::abs(q[0]); for (j = 1; j <= r; ++j) { tmp = std::abs(q[j]); if (fmax < tmp) fmax = tmp; } // divided differences scheme for (j = 0; j < r; ++j) for (k = 0; k < r - j; ++k) q[k] -= q[k + 1]; thresh = fmax*r*(r+1)*eps/2; // threshold for computing r+1 // central difference if (std::abs(q[0]) < thresh) h[i] = std::abs(dS[i]/(fmax*eps)); // dS/h ~ fmax*eps = abs. error of f else h[i] *= exp(log(std::abs(dS[i]/(p*A*q[0])))/r); // min dS/h + A f^(r)/h^p q += r + 1; } delete [] dS; delete [] f; } #endif
GB_binop__bor_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bor_int64) // A.*B function (eWiseMult): GB (_AemultB_08__bor_int64) // A.*B function (eWiseMult): GB (_AemultB_02__bor_int64) // A.*B function (eWiseMult): GB (_AemultB_04__bor_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_int64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bor_int64) // C+=b function (dense accum): GB (_Cdense_accumb__bor_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_int64) // C=scalar+B GB (_bind1st__bor_int64) // C=scalar+B' GB (_bind1st_tran__bor_int64) // C=A+scalar GB (_bind2nd__bor_int64) // C=A'+scalar GB (_bind2nd_tran__bor_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = (aij) | (bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) | (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BOR || GxB_NO_INT64 || GxB_NO_BOR_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bor_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bor_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bor_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bor_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bor_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bor_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bor_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bor_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bor_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x) | (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bor_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) | (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) | (aij) ; \ } GrB_Info GB (_bind1st_tran__bor_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) | (y) ; \ } GrB_Info GB (_bind2nd_tran__bor_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
shallow_water_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Miguel Maso Sotomayor // #ifndef KRATOS_SHALLOW_WATER_UTILITIES_H_INCLUDED #define KRATOS_SHALLOW_WATER_UTILITIES_H_INCLUDED // System includes // External includes // Project includes #include "includes/node.h" namespace Kratos { ///@addtogroup ShallowWaterApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ class ModelPart; // forward declaration /** * @ingroup ShallowWaterApplication * @class ShallowWaterUtilities * @brief This class is a wrapper of useful utilities for shallow water computations */ class KRATOS_API(SHALLOW_WATER_APPLICATION) ShallowWaterUtilities { public: ///@name Type Definitions ///@{ /// Pointer definition of ShallowWaterUtilities KRATOS_CLASS_POINTER_DEFINITION(ShallowWaterUtilities); typedef Node<3> NodeType; typedef Geometry<NodeType> GeometryType; ///@} ///@name Life Cycle ///@{ /// Default constructor. /// Destructor. ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void ComputeFreeSurfaceElevation(ModelPart& rModelPart); void ComputeHeightFromFreeSurface(ModelPart& rModelPart); void ComputeVelocity(ModelPart& rModelPart, bool PerformProjection = false); void ComputeSmoothVelocity(ModelPart& rModelPart); void ComputeMomentum(ModelPart& rModelPart); void ComputeEnergy(ModelPart& rModelPart); void ComputeAccelerations(ModelPart& rModelPart); void FlipScalarVariable(Variable<double>& rOriginVariable, Variable<double>& rDestinationVariable, ModelPart& rModelPart); void IdentifySolidBoundary(ModelPart& rModelPart, double SeaWaterLevel, Flags SolidBoundaryFlag); void IdentifyWetDomain(ModelPart& rModelPart, Flags WetFlag, double Thickness = 0.0); void ResetDryDomain(ModelPart& rModelPart, double Thickness = 0.0); template<class TContainerType> void CopyFlag(Flags OriginFlag, Flags DestinationFlag, TContainerType& rContainer) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(rContainer.size()); ++i) { auto it = rContainer.begin() + i; it->Set(DestinationFlag, it->Is(OriginFlag)); } } void NormalizeVector(ModelPart& rModelPart, Variable<array_1d<double,3>>& rVariable); template<class TVarType> void CopyVariableToPreviousTimeStep(ModelPart& rModelPart, const TVarType& rVariable) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(rModelPart.NumberOfNodes()); ++i) { auto const it_node = rModelPart.NodesBegin() + i; it_node->FastGetSolutionStepValue(rVariable,1) = it_node->FastGetSolutionStepValue(rVariable); } } void SetMinimumValue(ModelPart& rModelPart, const Variable<double>& rVariable, double MinValue); /* * @brief This method sets the z-coordinate of the mesh to zero */ void SetMeshZCoordinateToZero(ModelPart& rModelPart); /* * @brief This method moves the z-coordinate of the mesh according to a variable */ void SetMeshZCoordinate(ModelPart& rModelPart, const Variable<double>& rVariable); ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ ///@} ///@name Friends ///@{ ///@} private: ///@name Operations ///@{ double InverseHeight(const double Height, const double Epsilon); void CalculateMassMatrix(Matrix& rMassMatrix, const GeometryType& rGeometry); ///@} }; // Class ShallowWaterUtilities ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} ///@} addtogroup block } // namespace Kratos. #endif // KRATOS_SHALLOW_WATER_UTILITIES_H_INCLUDED defined
L2solverDMAp.c
/*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ /* GRACE/GRAIL L2 solver (MPI, MKL, OMP modules) Version: June 2014 Copyright (c) 2014 Kun Shang (shang.34@osu.edu) All Right Reserved */ /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ #include <math.h> #include <stdio.h> #include <stdlib.h> //#include <omp.h> #include "mkl.h" //#include "mkl_pblas.h" #include "mpi.h" //#include "mkl_scalapack.h" #define LAPACK_ROW_MAJOR 101 #define LAPACK_COL_MAJOR 102 #define max(A,B) ((A)>(B)?(A):(B)) #define min(A,B) ((A)>(B)?(B):(A)) const double T0 = 2451545.00000000; const double TWOPI = 6.283185307179586476925287; const double ASEC2RAD = 4.848136811095359935899141e-6; const double DEG2RAD = 0.017453292519943296; const double RAD2DEG = 57.295779513082321; void cal_date (double tjd, short int *year, short int *month, short int *day, double *hour); double julian_date (short int year, short int month, short int day, double hour); double grv_open (char *grv_name, char *label, int nmax, int mmax, double *coef); double cs2gp_pt (double *llr, double *cs, double gm, double a, int nmax, double *gpt, double *pt); double lgdr(double t, int nmax, int m, double *pbar); double zero0zero1 (double *cs, int nmax); int brinv (double *a,int n); int brank(double *a, int m, int n); void choldc(double *a, int n, double p[]); void cholsl(double *a, int n, double p[], double b[], double x[]); void solvels_chol(double *a, int n, double *y, double *x, int nocov); void solvegaus(double *a, int n, double *y, double *x); double modvect (double *v); double dotvect (double *v1, double *v2); void crsvect (double *v1, double *v2, double *v); void mt (double *a, int m, int n, double *b); void brmul (double *a, double *b, int m,int n, int k,double *c); /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ int main (int argc, char *argv[]) { int ndata, ndata_all, ndata_avg, dt; double GMA[2]; FILE *fp_stdin, *fpin_l1c, *fpout_coef; int i, j, n, nmax, mmax, slvls, l_old, ndat_max, factor_b1, factor_b2, m, l, k, ind, num, *num_arc, *gps_arc, narc_max, *ind_m, LFIX = 0, ifix[4000], LSTK = 0, istk[4000], nply, ncpr, nemp, ndt, narc, npd, *gpst, *gps_eph, solvefor, solvegrv, solveemp; short int year, month, day; double *tp1, *csr, *gfz, *jpl, std, alpha, *yi, GMR2, r2R, hour, JD0, t, Tprd, *pt1, *pt2, *ai, *coefaam, factor_n, *lambda, *ksiupd, *k0, *aksi, *l1cf, *factor_a, *factor_b, factor_p, factor_r, *ksi, *dksi, *dks,*dksiupd, r0[1], sigma0, pweight, *p0, *p0b0, *knk0, *knk, *kksi, *id, *kfix, *kfixt,*kn, *nk, *nkknk, *atpa, *atpy, *atpai, *atpyi, *atpa_one, *cest, omega, etpe, c, s, vfix[4000], vstk[4000], *llr1, *llr2, *l1c_eph, *l1ct, vb1, vb2; char line[500], card[20], f_aam[2][200]={"GIF48.GEO", "\0"}, stdname[200], l1cfile[400], l2file[400]; time_t s0, s1, s2, s3, s4, s5, s6; long nl, solveforN, solvegrvN, solvefor2, is, in; double tbeg, tend; int myrank = 0, root_process = 0, nprocs = 1, ierr, ndata_s, ndata_e, npara_s, npara_e, npara_all, npara_avg, npara; int i_one = 1, i_negone = -1, i_zero = 0; double zero=0.0E+0, one=1.0E+0; int nprow, npcol, context, ictxt, myrow, mycol, nb, lld, lld_distr, mp, nq, info, desc_atpa[9], desc_atpy[9], desc_ksi[9], desc_ai[9], desc_yi[9]; double int_lim, mem_lim; ierr = MPI_Init(&argc, &argv); ierr = MPI_Comm_rank(MPI_COMM_WORLD, &myrank); ierr = MPI_Comm_size(MPI_COMM_WORLD, &nprocs); nprow = 1; npcol = nprocs; blacs_get_( &i_negone, &i_zero, &ictxt ); blacs_gridinit_( &ictxt, "R", &nprow, &npcol ); blacs_gridinfo_( &ictxt, &nprow, &npcol, &myrow, &mycol ); printf ("(%4d) nprocs = %4d nprow = %4d npcol = %4d myrow = %4d mycol = %4d\n", myrank, nprocs, nprow, npcol, myrow, mycol); if (argc != 3) { printf ("usage: L2solver l1cfile nmax\n"); exit(0); } else { sscanf (argv[1], "%s", l1cfile); sscanf (argv[2], "%d", &nmax); } if ( (fpin_l1c = fopen (l1cfile,"r")) == NULL) { printf ("Cannot open fpout_a file!\n"); exit (0); } s0 = time(NULL); dt = 5; /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ // distribute ndata_all ndata_all = 0; while (1) { if (fgets(line,400, fpin_l1c) ==NULL) break; ndata_all ++; } rewind(fpin_l1c); ndata_avg = (int)(ndata_all / nprocs) + 1; mp = numroc_( &ndata_all, &ndata_avg, &myrank, &i_zero, &nprocs ); ndata_s = myrank * ndata_avg; ndata_e = (myrank + 1) * ndata_avg - 1; if (ndata_e > ndata_all - 1) ndata_e = ndata_all - 1; ndata = ndata_e - ndata_s + 1; if (ndata < 0) ndata = 0; printf("(%4d) ndata_all = %d ndata_avg = %d ndata = %d ndata_s = %d ndata_e = %d mp = %d\n", myrank, ndata_all, ndata_avg, ndata, ndata_s, ndata_e, mp); /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ // distribute solvefor mmax = nmax; npara_all = (nmax + 1) * (nmax + 1); npara_avg = (int)(npara_all / nprocs) + 1; npara_s = myrank * npara_avg; npara_e = (myrank + 1) * npara_avg - 1; if (npara_e > npara_all - 1) npara_e = npara_all - 1; npara = npara_e - npara_s + 1; if (npara < 0) npara = 0; printf("(%4d) nmax = %d npara_all = %d npara_avg = %d npara = %d npara_s = %d npara_e = %d\n", myrank, nmax, npara_all, npara_avg, npara, npara_s, npara_e); /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ // index and memeory check int_lim = (double)npara_all * (double)ndata /1024.0/1024.0/1024.0; mem_lim = ((double)npara_all * (double)(ndata + npara) + (double) ndata_all * 10) / 1024.0/1024.0/1024.0 * 8; printf("(%4d) index check: ", myrank); if (int_lim > 2) printf ("int_lim ~= %fG > INT_LIM (2G) : warning!!!\n", int_lim); else printf ("int_lim ~= %fG < INT_LIM (2G) : safe~~~\n", int_lim); printf("(%4d) memory check: ", myrank); if (mem_lim > 4) printf ("mem_lim ~= %fG > MEM_LIM (4G) : warning!!!\n", mem_lim); else printf ("mem_lim ~= %fG < MEM_LIM (4G) : safe~~~\n", mem_lim); /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ // calloc local ndata l1c_eph = (double *) calloc (10 * ndata_all, sizeof(double)); llr1 = (double *) calloc (3 * ndata, sizeof(double)); llr2 = (double *) calloc (3 * ndata, sizeof(double)); yi = (double *) calloc ( ndata, sizeof(double)); /* yi is actually distributed into local, but atpy can only stay in procs = 0*/ descinit_( desc_yi, &i_one, &ndata_all, &i_one, &ndata, &i_zero, &i_zero, &ictxt, &i_one, &info ); i = 0; while (1) { if (fgets(line,400, fpin_l1c) ==NULL) break; sscanf (line, "%*d%lf%lf%lf%lf%lf%lf%lf%lf%lf%lf", &l1c_eph[i * 10], &l1c_eph[i * 10 + 1], &l1c_eph[i * 10 + 2], &l1c_eph[i * 10 + 3], &l1c_eph[i * 10 + 4], &l1c_eph[i * 10 + 5], &l1c_eph[i * 10 + 6], &l1c_eph[i * 10 + 7], &l1c_eph[i * 10 + 8], &l1c_eph[i * 10 + 9]); i ++; } if (i != ndata_all) { printf ("(%4d) ndata_all (%d) != i (%d)\n", myrank, ndata_all, i); free (l1c_eph); fclose(fpin_l1c); exit(0); } fclose(fpin_l1c); n = 0; for (i = ndata_s; i <= ndata_e; i ++) { llr1[n * 3] = l1c_eph[i * 10]; llr1[n * 3 + 1] = l1c_eph[i * 10 + 1]; llr1[n * 3 + 2] = l1c_eph[i * 10 + 2]; llr2[n * 3] = l1c_eph[i * 10 + 3]; llr2[n * 3 + 1] = l1c_eph[i * 10 + 4]; llr2[n * 3 + 2] = l1c_eph[i * 10 + 5]; yi[n] = l1c_eph[i * 10 + 6]; // csr[n] = l1c_eph[i * 10 + 7]; // gfz[n] = l1c_eph[i * 10 + 8]; // jpl[n] = l1c_eph[i * 10 + 9]; n++; } if (n != ndata) { printf ("(%4d) ndata (%d) != n (%d)\n", myrank, ndata, i); free (l1c_eph); exit(0); } free (l1c_eph); /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ // calloc local ai, atpa /* int m_ai = numroc_( &npara_all, &npara_all, &myrow, &i_zero, &nprow ); int n_ai = numroc_( &ndata_all, &ndata_avg, &mycol, &i_zero, &npcol ); printf ("(%4d) m_ai = %d n_ai = %d npara_all = %d ndata = %d\n", myrank, m_ai, n_ai, npara_all, ndata); int m_atpa = numroc_( &npara_all, &npara_all, &myrow, &i_zero, &nprow ); int n_atpa = numroc_( &npara_all, &npara_avg, &mycol, &i_zero, &npcol ); printf ("(%4d) m_atpa = %d n_atpa = %d npara_all = %d npara = %d\n", myrank, m_atpa, n_atpa, npara_all, npara); int m_atpy = numroc_( &npara_all, &npara_all, &myrow, &i_zero, &nprow ); int n_atpy = numroc_( &i_one, &npara_avg, &mycol, &i_zero, &npcol ); printf ("(%4d) m_atpy = %d n_atpy = %d npara_all = %d \n", myrank, m_atpy, n_atpy, npara_all); descinit_( desc_ai, &npara_all, &ndata_all, &npara_all, &ndata, &i_zero, &i_zero, &ictxt, &npara_all, &info ); ai = (double *) calloc ( m_ai * n_ai, sizeof(double)); descinit_( desc_atpa, &npara_all, &npara_all, &npara_avg, &npara_avg, &i_zero, &i_zero, &ictxt, &npara_all, &info ); atpa = (double *) calloc ( m_atpa * n_atpa, sizeof(double)); descinit_( desc_atpy, &npara_all, &i_one, &npara_avg, &npara_avg, &i_zero, &i_zero, &ictxt, &npara_all, &info ); atpy = (double *) calloc ( m_atpy * n_atpy, sizeof(double)); */ descinit_( desc_ai, &npara_all, &ndata_all, &npara_all, &ndata, &i_zero, &i_zero, &ictxt, &npara_all, &info ); ai = (double *) calloc ( npara_all * ndata, sizeof(double)); descinit_( desc_atpa, &npara_all, &npara_all, &npara_avg, &npara_avg, &i_zero, &i_zero, &ictxt, &npara_all, &info ); atpa = (double *) calloc ( npara_all * npara, sizeof(double)); descinit_( desc_atpy, &npara_all, &i_one, &npara_avg, &npara_avg, &i_zero, &i_zero, &ictxt, &npara_all, &info ); atpy = (double *) calloc ( npara_all, sizeof(double)); /* atpy is distributed local matrix, but in this case, only stays in (0,0), i.e. procs = 0 */ descinit_( desc_ksi, &npara_all, &i_one, &npara_all, &i_one, &i_zero, &i_zero, &ictxt, &npara_all, &info ); ksi = (double *) calloc ( npara_all, sizeof(double)); dks = (double *) calloc ( npara_all, sizeof(double)); /* ksi and dksi are global matrix, only stays in (0,0), i.e. procs = 0 */ dksi = (double *) calloc ( npara_all, sizeof(double)); /* for (i = 0; i < 9; i ++) printf ("(%4d) ai[%d] = %9d atpa[%d] = %9d atpy[%d] = %9d yi[%d] = %9d ksi[%d] = %9d\n", myrank, i, desc_ai[i], i, desc_atpa[i], i, desc_atpy[i], i, desc_yi[i], i, desc_ksi[i]); */ coefaam = (double *) calloc (npara_all, sizeof(double)); GMA[0] = 398600.44150E+09; GMA[1] = 6378136.3; grv_open (f_aam[0], f_aam[1], nmax, mmax, coefaam); zero0zero1 (coefaam, nmax); s1 = time(NULL); printf("(%4d) %5ld: seconds of initialization and scan data\n", myrank, s1-s0); fflush(stdout); /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ pt1 = (double *) calloc ( npara_all, sizeof(double)); pt2 = (double *) calloc ( npara_all, sizeof(double)); for (i = 0; i < ndata; i ++) { cs2gp_pt (&llr1[i * 3], coefaam, GMA[0], GMA[1], nmax, &vb1, pt1); cs2gp_pt (&llr2[i * 3], coefaam, GMA[0], GMA[1], nmax, &vb2, pt2); //////////////////////////////////////// for(n = 0; n < npara_all; n++) { ai[i * npara_all + n] = pt2[n] - pt1[n]; } } free (llr1); free (llr2); free (pt1); free (pt2); s2 = time(NULL); printf("(%4d) %5ld: seconds of processing gravity and partial\n", myrank, s2-s1); fflush(stdout); MPI_Barrier (MPI_COMM_WORLD); /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ std = 0.8e-3; pweight = 1.0/std/std; alpha = 1.0; alpha = pweight; pdsyrk_ ("L", "N", &npara_all, &ndata_all, &alpha, ai, &i_one, &i_one, desc_ai, &zero, atpa, &i_one, &i_one, desc_atpa); /* for (n=0; n<npara_all; n++) { for (m=0; m<npara_all; m++) { printf("%10.2e ", atpa[n*npara_all + m]); } printf("\n"); } printf("\n"); s3 = time(NULL); printf("\n%5ld: seconds of finish pdsyrk_\n", s3-s2); fflush(stdout); */ pdgemv_ ("N", &npara_all, &ndata_all, &alpha, ai, &i_one, &i_one, desc_ai, yi, &i_one, &i_one, desc_yi, &i_one, &zero, atpy, &i_one, &i_one, desc_atpy, &i_one); s3 = time(NULL); printf("(%4d) %5ld: seconds of finish ATPA&ATPY\n", myrank, s3-s2); fflush(stdout); free (ai); /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ pdpotrf_("L", &npara_all, atpa, &i_one, &i_one, desc_atpa, &info); pdpotrs_("L", &npara_all, &i_one, atpa, &i_one, &i_one, desc_atpa, atpy, &i_one, &i_one, desc_atpy, &info); pdpotri_("L", &npara_all, atpa, &i_one, &i_one, desc_atpa, &info); /* for (n=0; n<npara_all; n++) { for (m=0; m<npara_all; m++) { printf("%10.2e ", atpa[n*npara_all + m]); } printf("\n"); } printf("\n"); */ blacs_barrier_( &ictxt, "A"); //////////////////////////////////// // get dksi from distributed atpa /* npara_all = (nmax + 1) * (nmax + 1); npara_avg = (int)(npara_all / nprocs) + 1; npara_s = myrank * npara_avg; npara_e = (myrank + 1) * npara_avg - 1; if (npara_e > npara_all - 1) npara_e = npara_all - 1; npara = npara_e - npara_s + 1; if (npara < 0) npara = 0; printf("(%4d) nmax = %d npara_all = %d npara_avg = %d npara = %d npara_s = %d npara_e = %d\n", myrank, nmax, npara_all, npara_avg, npara, npara_s, npara_e); */ for (n = 0; n < npara; n++) { dksi[npara_s + n] = atpa[n * npara_all + (npara_s + n)]; } MPI_Reduce(dksi, dks, npara_all, MPI_DOUBLE, MPI_SUM, root_process, MPI_COMM_WORLD); /////////////////////////////// if(myrank == root_process) { /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ for (n = 0; n < npara_all; n++) ksi[n] = atpy[n]; s4 = time(NULL); printf("(%4d) %5ld: seconds of inverting N\n", myrank, s4-s3); fflush(stdout); /* for (n = 0; n < solvefor; n++) { for (m = 0; m < solvefor; m++) { // dksi[n * solvefor + m] = atpa[n * solvefor + m] - dksiupd[n * solvefor + m]; dksi[n * solvefor + m] = atpa[n * solvefor + m]; if (dksi[n * solvefor + m] < 0) dksi[n * solvefor + m] = 0; } } */ sigma0 = 1; sprintf (l2file, "%s.%d.DMA.L2", l1cfile, nmax); // strcat (l1cfile,".L2"); printf ("OUTPUT: %s\n", l2file); if ( (fpout_coef = fopen (l2file,"w")) == NULL) { printf ("Cannot open fpout_coef file!\n"); exit (0); } for (m = 0; m <= nmax; m ++) { l = nmax - m + 1; for (k = 0; k < l; k++) { if (m==0) { n = k + m; fprintf(fpout_coef, "%4d %4d %23.13e %23.13e %23.13e %23.13e\n", n, m, ksi[k], zero, sigma0 * sqrt(dks[k]), zero); // zero, zero); } else { ind = nmax + 1 + (2 * nmax - m + 2) * (m - 1); n = k + m; fprintf(fpout_coef, "%4d %4d %23.13e %23.13e %23.13e %23.13e\n", n, m, ksi[ind + n - m], ksi[ind + n - m + l], sigma0 * sqrt(dks[ind + n - m]), sigma0 * sqrt(dks[ind + n - m + l])); // zero, zero); } } } s5 = time(NULL); printf("(%4d) %5ld: seconds of output data\n", myrank, s5-s4); fflush(stdout); free (ksi); free (dks); free (dksi); free (atpa); free (atpy); free (coefaam); fclose(fpout_coef); printf ("\nNormal end of ECHO!\n\npress any key to finish...\n"); } // blacs_gridexit_( &ictxt ); // blacs_exit_( &i_zero ); ierr = MPI_Finalize(); exit(0); } /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ double julian_date (short int year, short int month, short int day, double hour) { long int jd12h; double tjd; jd12h = (long) day - 32075L + 1461L * ((long) year + 4800L + ((long) month - 14L) / 12L) / 4L + 367L * ((long) month - 2L - ((long) month - 14L) / 12L * 12L) / 12L - 3L * (((long) year + 4900L + ((long) month - 14L) / 12L) / 100L) / 4L; tjd = (double) jd12h - 0.5 + hour / 24.0; return (tjd); } void cal_date (double tjd, short int *year, short int *month, short int *day, double *hour) { long int jd, k, m, n; double djd; djd = tjd + 0.5; jd = (long int) djd; *hour = fmod (djd,1.0) * 24.0; k = jd + 68569L; n = 4L * k / 146097L; k = k - (146097L * n + 3L) / 4L; m = 4000L * (k + 1L) / 1461001L; k = k - 1461L * m / 4L + 31L; *month = (short int) (80L * k / 2447L); *day = (short int) (k - 2447L * (long int) *month / 80L); k = (long int) *month / 11L; *month = (short int) ((long int) *month + 2L - 12L * k); *year = (short int) (100L * (n - 49L) + m + k); return; } /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ /* */ /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ double grv_open (char *grv_name, char *label, int nmax, int mmax, double *coef) { FILE *fp_grv; double c,s, nd, md; int n=999,m=999, l, ind; char string[200], name[20]; if ((fp_grv = fopen (grv_name,"r")) == NULL) { printf ("Cannot open gravity file?\n"); exit (0); } memset (coef,0,(nmax+1)*(nmax+1)); // coef[0] = 1; while (1) { if (fgets (string, 200, fp_grv) == NULL) break; if (strlen(label)==0) { sscanf (string, "%lf%lf%lf%lf", &nd, &md, &c, &s); n = (int)nd; m = (int)md; } else { sscanf (string, "%s", name); if (strcmp (name, label) ==0) { sscanf (string, "%*s%lf%lf%lf%lf", &nd, &md, &c, &s); n = (int)nd; m = (int)md; } } if (n > nmax || m > mmax) continue; else if (m == 0) { coef[n] = c; } else { l = nmax - m + 1; ind = nmax + 1 + (2 * nmax - m + 2) * (m - 1); coef[ind + n - m] = c; coef[ind + n - m + l] = s; } } fclose(fp_grv); return 0; } /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ /* */ /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ double cs2gp_pt (double *llr, double *cs, double gm, double a, int nmax, double *gpt, double *pt) { int n, m, k, l, ind; double sinf, cosf, *cosml, *sinml, *aprn, *pbar, lat, lon, r, gpti, t, a2r; /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ lat = llr[0]; lon = llr[1]; r = llr[2]; sinf = sin(lat * DEG2RAD); cosf = cos(lat * DEG2RAD); // #pragma omp parallel private(cosml, sinml, aprn, pbar, pbar1, pbar2, n, m, k, l, ind, sinf, cosf) cosml = (double *) calloc ( nmax + 1, sizeof(double)); //cos(m*lamta) sinml = (double *) calloc ( nmax + 1, sizeof(double)); //sin(m*lamta) aprn = (double *) calloc ( nmax + 1, sizeof(double)); //sin(m*lamta) pbar = (double *) calloc ( nmax + 1, sizeof(double)); cosml[0] = 1; sinml[0] = 0; cosml[1] = cos(lon * DEG2RAD); sinml[1] = sin(lon * DEG2RAD); for (m = 2; m <= nmax; m++) { // cosml[m] = cos(m * lon * DEG2RAD); // sinml[m] = sin(m * lon * DEG2RAD); cosml[m] = 2.0 * cosml[1] * cosml[m-1] - cosml[m-2]; sinml[m] = 2.0 * cosml[1] * sinml[m-1] - sinml[m-2]; } aprn[0] = gm / r; a2r = a / r; for (n = 1; n <= nmax; n++) { // aprn[n] = pow (a / r, n) * gm / r; aprn[n] = aprn[n - 1] * a2r; } /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ t = sinf; for (m = 0; m <= nmax; m ++) { l = nmax - m + 1; // lgdr2(t, nmax, m, pbar, pbar1, pbar2); lgdr(t, nmax, m, pbar); for (k = 0; k < l; k++) { if (m==0) { // ind = 0; n = k + m; pt[k] = aprn[n] * pbar[k]; } else { ind = nmax + 1 + (2 * nmax - m + 2) * (m - 1); n = k + m; pt[ind + n - m] = aprn[n] * pbar[k] * cosml[m]; pt[ind + n - m + l] = aprn[n] * pbar[k] * sinml[m]; } } } //! ATPA gpti = 0; for(k = 0; k < (nmax + 1) * (nmax + 1); k++) { gpti = gpti + pt[k] * cs[k]; // printf ("k = %d pt = %e\t cs = %e\t gp = %e \n", k, pt[k], cs[k], pt[k] * cs[k]); // pt[k] = pnmc[k]; } *gpt = gpti; // *gpt = gm/r + gm/r * a/r * a/r * (sqrt(5.0) * (1.5 * t * t - 0.5)) * cs[2]; // printf ("cs[2] = %e\n", cs[2]); free (pbar); free (cosml); free (sinml); free (aprn); return 1; } /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ double lgdr(double t, int nmax, int m, double *pbar) /* ! THIS CALCULATES THE FULLY NORMALIZED LEGENDRE FUNCTION WITH GIVEN ORDER(M), ! MAXIMUM DEGREE (NMAX), AND GIVEN EVALUATION POINT, T (COSINES OF COLATITUDE). ! THIS RETURNS ALL Pn,m, P'n,m, AND P''n,m (m=<n<=Nmax). ! THE RECURSION FORMULAR FOR THE FUNCTION ITSELF IS GIVEN IN JEKELI(1996). ! THE RECURSION FORMULAR FOR THE 1ST DERIVATIVE IS GIVEN IN TSCHERNING, ET AL(1983). ! THE FORMULAR FOR THE 2ND DERIVATIVE IS FROM THE ASSOCIATE LEGENDRE EQUATION. ! NOTE : EQUATIONS GIVEN IN TSCHERNING, ET AL(1983) HAVE ERRATA. ! ! S.C. Han, 1/24/01 (MODIFIED FOR CRAY T94 2/13/01) ! */ { int i; //REAL*8 :: PBAR(NMAX-M+1),PBAR1(NMAX-M+1),PBAR2(NMAX-M+1),T,P00,P11,C,D double p00, p11, c, d; //! THE FULLY NORMALIZED ASSOCIATED LEGENDRE FUNCTION //! Pm,m : JEKEIL (A.3c) & (A.3d) , P'm,m : TSCHERNING (7) p00 = 1.0; p11 = sqrt (3.0*(1.0-t*t)); if (m>=1) { pbar[0] = p11; for (i = 2; i <= m; i++) { pbar[0] = sqrt((2.0*i+1.0)/(2.0*i)*(1.0-t*t))*pbar[0]; } } else { pbar[0]=p00; } if (nmax - m + 1 >= 2) { pbar[1] = sqrt(2.0*m +3.0) * t * pbar[0]; } for(i = 3; i <= nmax-m+1; i++) { c=((2.0*m+2.0*i-3.0) * (2.0*m + 2.0*i-1.0)) / ((i-1.0)*(2.0*m+i-1.0)); d=((2.0*m+2.0*i-1.0)*(2.0*m+i-2.0)*(i-2.0)) / ((2.0*m+2.0*i-5.0)*(i-1.0)*(2.0*m+i-1.0)); pbar[i-1] = sqrt(c)*t*pbar[i-2] - sqrt(d) * pbar[i-3]; } return 0; } double zero0zero1 (double *cs, int nmax) { int n, m, l, ind, ic, is; cs[0] = 0; cs[1] = 0; n = 1; m = 1; l = nmax - m + 1; ind = nmax + 1 + (2 * nmax - m + 2) * (m - 1); ic = ind + n - m; is = ind + n - m + l; cs[ic] = 0; cs[is] = 0; return 0; } /* ------------------------------------------------------------------------ Purpose: some subroutines of matrix and vector operation Notes: Programmer: Kun Shang @ 4.29.2014 Functions: int brinv (double *a,int n); int brank(double *a, int m, int n); void choldc(double *a, int n, double p[]); void cholsl(double *a, int n, double p[], double b[], double x[]); void solvels_chol(double *a, int n, double *y, double *x, int nocov); void solvegaus(double *a, int n, double *y, double *x); double modvect (double *v); double dotvect (double *v1, double *v2); void crsvect (double *v1, double *v2, double *v); void mt (double *a, int m, int n, double *b); void brmul (double *a, double *b, int m,int n, int k,double *c); ------------------------------------------------------------------------ */ /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ /* * brinv - * @param1: description of param1 * @param2: description of param2 * * version: 20 Aug 2010 */ /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ int brinv (double *a,int n) { int *is, *js, i, j, k, l, u, v; double d, p; is = (int *)malloc (n * sizeof(int)); js = (int *)malloc (n * sizeof(int)); for (k = 0; k <= n - 1; k++) { d=0.0; for (i = k; i <= n - 1; i++) { for (j = k; j <= n - 1; j++) { l = i * n + j; p = fabs (a[l]); if (p > d) { d = p; is[k] = i; js[k] = j; } } } if (d + 1.0 == 1.0) { // rk = brank(a, n, n); printf ("warning: Matrix may be ill-conditioned\n"); // printf ("rank = %d < dimension %d\n", rk, n); // free (is); // free (js); // exit(0); } if (is[k] != k) { for (j = 0; j <= n - 1; j++) { u = k * n + j; v = is[k] * n + j; p = a[u]; a[u] = a[v]; a[v] = p; } } if (js[k] != k) { for (i = 0; i <= n - 1; i++) { u = i * n + k; v = i * n + js[k]; p = a[u]; a[u] = a[v]; a[v] = p; } } l = k * n + k; a[l] = 1.0 / a[l]; for (j = 0; j <= n - 1; j++) { if (j != k) { u = k * n + j; a[u] = a[u] * a[l]; } } for (i = 0; i <= n - 1; i++) { if (i != k) for (j = 0; j <= n - 1; j++) if (j != k) { u = i * n + j; a[u] = a[u] - a[i * n + k] * a[k * n + j]; } } for (i = 0; i <= n - 1; i++) { if (i != k) { u = i * n + k; a[u] = - a[u] * a[l]; } } } for (k = n - 1; k >= 0; k--) { if (js[k] != k) { for (j = 0; j <= n - 1; j++) { u = k * n + j; v = js[k] * n + j; p = a[u]; a[u] = a[v]; a[v] = p; } } if (is[k] != k) { for (i = 0; i <= n - 1; i++) { u = i * n + k; v = i * n + is[k]; p = a[u]; a[u] = a[v]; a[v] = p; } } } free(is); free(js); return(1); } int brank(double *a, int m, int n) { int i,j,k,nn,is = 0,js = 0,l,ll,u,v; double q,d; nn=m; if (m>=n) nn=n; k=0; for (l=0; l<=nn-1; l++) { q=0.0; for (i=l; i<=m-1; i++) for (j=l; j<=n-1; j++) { ll=i*n+j; d=fabs(a[ll]); if (d>q) { q=d; is=i; js=j;} } if (q+1.0==1.0) return(k); k=k+1; if (is!=l) { for (j=l; j<=n-1; j++) { u=l*n+j; v=is*n+j; d=a[u]; a[u]=a[v]; a[v]=d; } } if (js!=l) { for (i=l; i<=m-1; i++) { u=i*n+js; v=i*n+l; d=a[u]; a[u]=a[v]; a[v]=d; } } ll=l*n+l; for (i=l+1; i<=n-1; i++) { d=a[i*n+l]/a[ll]; for (j=l+1; j<=n-1; j++) { u=i*n+j; a[u]=a[u]-d*a[l*n+j]; } } } return(k); } void choldc(double *a, int n, double p[]) /* * Given a positive-definite symmetric matrix a[1..n][1..n], * this routine constructs its Cholesky decomposition, A = L · LT . * On input, only the upper triangle of a need be given; it is not modified. * The Cholesky factor L is returned in the lower triangle of a, * except for its diagonal elements which are returned in p[1..n]. * */ { int i,j,k, rk; double sum; for (i=0;i<n;i++) { for (j=i;j<n;j++) { sum=a[i * n + j]; for (k=i-1;k>=0;k--) sum -= a[i * n + k]*a[j * n + k]; if (i == j) { if (sum <= 0.0) { rk = brank(a, n, n); printf("error: Matrix is not positive definite:\n"); printf(" i = %d\tj = %d\tsum = %e\n", i, j, sum); printf(" rank = %d\n", rk); exit(0); } p[i]=sqrt(sum); } else a[j * n + i]=sum/p[i]; } } } void cholsl(double *a, int n, double p[], double b[], double x[]) /* * Solves the set of n linear equations A · x = b, * where a is a positive-definite symmetric matrix. * a[1..n][1..n] and p[1..n] are input as the output of the routine choldc. * Only the lower subdiagonal portion of a is accessed. * b[1..n] is input as the right-hand side vector. * The solution vector is returned in x[1..n]. * a, n, and p are not modified and can be left in place for * successive calls with different right-hand sides b. * b is not modified unless you identify b and x in the calling sequence, * which is allowed. * */ { int i,k; double sum; for (i=0;i<n;i++) { for (sum=b[i],k=i-1;k>=0;k--) sum -= a[i * n + k]*x[k]; x[i]=sum/p[i]; } for (i=n-1;i>=0;i--) { for (sum=x[i],k=i+1;k<n;k++) sum -= a[k * n + i]*x[k]; x[i]=sum/p[i]; } } ///////////********************//////////////// void solvels_chol(double *a, int n, double *y, double *x, int nocov) { double *p, sum; int i, k, j; p = (double *)calloc (n, sizeof(double)); choldc(a, n, p); cholsl(a, n, p, y, x); if (nocov == 1) { free (p); return; } for (i=0;i<n;i++) { a[i * n + i]=1.0/p[i]; for (j=i+1;j<n;j++) { sum=0.0; for (k=i;k<j;k++) sum -= a[j * n + k]*a[k * n + i]; a[j * n + i]=sum/p[j]; } } for (i = 0; i <= n - 1; i++) { for (j = i; j <= n - 1; j++) { sum = 0.0; for (k = j; k <= n - 1; k++) sum = sum + a[k * n + i] * a[k * n + j]; a[i * n + j] = sum; } } for (i = 0; i <= n - 1; i++) { for (j = 0; j <= i - 1; j++) { a[i * n + j] = a[j * n + i] ; } } free(p); return; } void solvegaus(double *a, int n, double *y, double *x) { brinv(a, n); brmul(a, y, n, n, 1, x); } double modvect (double *v) { return sqrt(v[0] * v[0] + v[1] * v[1] + v[2] * v[2]); } double dotvect (double *v1, double *v2) { return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]; } void crsvect (double *v1, double *v2, double *v) { v[0] = v1[1] * v2[2] - v1[2] * v2[1]; v[1] = v1[2] * v2[0] - v1[0] * v2[2]; v[2] = v1[0] * v2[1] - v1[1] * v2[0]; } /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ /* * mt - * @param1: description of param1 * @param2: description of param2 * * version: 20 Aug 2010 */ /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ void mt (double *a, int m, int n, double *b) { int i, j; for (i = 0; i <= m - 1; i++) { for (j = 0; j <= n - 1; j++) b[j * m + i] = a[i * n + j]; } return; } /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ /* * brmul - * @param1: description of param1 * @param2: description of param2 * * version: 20 Aug 2010 */ /*--+----1----+----2----+----3----+----4----+----5----+----6----+----7----+--*/ void brmul (double *a, double *b, int m,int n, int k,double *c) { int i, j, l, u; for (i = 0; i <= m - 1; i++) { for (j = 0; j <= k - 1; j++) { u = i * k + j; c[u] = 0.0; for (l = 0; l <= n - 1; l++) c[u] = c[u] + a[i * n + l] * b[l * k + j]; } } return; }
lu.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - LU This benchmark is an OpenMP C version of the NPB LU code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: S. Weeratunga V. Venkatakrishnan E. Barszcz M. Yarrow OpenMP C version: S. Satoh 3.0 structure translation: M. Popov --------------------------------------------------------------------*/ #include "../common/npb-C.h" /* global variables */ #include "applu.h" #if defined(_OPENMP) /* for thread synchronization */ static boolean flag[ISIZ1/2*2+1]; #endif /* _OPENMP */ /* function declarations */ static void blts (int nx, int ny, int nz, int k, double omega, double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5], double ldz[ISIZ1][ISIZ2][5][5], double ldy[ISIZ1][ISIZ2][5][5], double ldx[ISIZ1][ISIZ2][5][5], double d[ISIZ1][ISIZ2][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0 ); static void buts(int nx, int ny, int nz, int k, double omega, double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5], double tv[ISIZ1][ISIZ2][5], double d[ISIZ1][ISIZ2][5][5], double udx[ISIZ1][ISIZ2][5][5], double udy[ISIZ1][ISIZ2][5][5], double udz[ISIZ1][ISIZ2][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0 ); static void domain(void); static void erhs(void); static void error(void); static void exact( int i, int j, int k, double u000ijk[5] ); static void jacld(int k); static void jacu(int k); static void l2norm (int nx0, int ny0, int nz0, int ist, int iend, int jst, int jend, double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5], double sum[5]); static void pintgr(void); static void read_input(void); static void rhs(void); static void setbv(void); static void setcoeff(void); static void setiv(void); static void ssor(void); static void verify(double xcr[5], double xce[5], double xci, char *class, boolean *verified); /*-------------------------------------------------------------------- program applu --------------------------------------------------------------------*/ int main(int argc, char **argv) { /*-------------------------------------------------------------------- c c driver for the performance evaluation of the solver for c five coupled parabolic/elliptic partial differential equations. c --------------------------------------------------------------------*/ char class; boolean verified; double mflops; int nthreads = 1; /*-------------------------------------------------------------------- c read input data --------------------------------------------------------------------*/ read_input(); /*-------------------------------------------------------------------- c set up domain sizes --------------------------------------------------------------------*/ domain(); /*-------------------------------------------------------------------- c set up coefficients --------------------------------------------------------------------*/ setcoeff(); /*-------------------------------------------------------------------- c set the boundary values for dependent variables --------------------------------------------------------------------*/ setbv(); /*-------------------------------------------------------------------- c set the initial values for dependent variables --------------------------------------------------------------------*/ setiv(); /*-------------------------------------------------------------------- c compute the forcing term based on prescribed exact solution --------------------------------------------------------------------*/ erhs(); { #if defined(_OPENMP) nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /*-------------------------------------------------------------------- c perform the SSOR iterations --------------------------------------------------------------------*/ ssor(); /*-------------------------------------------------------------------- c compute the solution error --------------------------------------------------------------------*/ error(); /*-------------------------------------------------------------------- c compute the surface integral --------------------------------------------------------------------*/ pintgr(); /*-------------------------------------------------------------------- c verification test --------------------------------------------------------------------*/ verify ( rsdnm, errnm, frc, &class, &verified ); mflops = (double)itmax*(1984.77*(double)nx0 *(double)ny0 *(double)nz0 -10923.3*pow2((double)( nx0+ny0+nz0 )/3.0) +27770.9* (double)( nx0+ny0+nz0 )/3.0 -144010.0) / (maxtime*1000000.0); c_print_results("LU", class, nx0, ny0, nz0, itmax, nthreads, maxtime, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, "(none)"); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void blts (int nx, int ny, int nz, int k, double omega, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5], double ldz[ISIZ1][ISIZ2][5][5], double ldy[ISIZ1][ISIZ2][5][5], double ldx[ISIZ1][ISIZ2][5][5], double d[ISIZ1][ISIZ2][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0 ) { /*-------------------------------------------------------------------- c c compute the regular-sparse, block lower triangular solution: c c v <-- ( L-inv ) * v c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, m; double tmp, tmp1; double tmat[5][5]; #pragma omp for for (i = ist; i <= iend; i++) { #pragma omp parallel for firstprivate(iend ,ist ,j ,m ,ldz ,k ,v ,omega ,jst ,jend ,i ) for (j = jst; j <= jend; j++) { #pragma omp parallel for firstprivate(iend ,ist ,j ,m ,ldz ,k ,v ,omega ,jst ,jend ,i ) for (m = 0; m < 5; m++) { v[i][j][k][m] = v[i][j][k][m] - omega * ( ldz[i][j][m][0] * v[i][j][k-1][0] + ldz[i][j][m][1] * v[i][j][k-1][1] + ldz[i][j][m][2] * v[i][j][k-1][2] + ldz[i][j][m][3] * v[i][j][k-1][3] + ldz[i][j][m][4] * v[i][j][k-1][4] ); } } } #pragma omp for for (i = ist; i <= iend; i++) { #if defined(_OPENMP) if (i != ist) { while (flag[i-1] == 0) { ; } } if (i != iend) { while (flag[i] == 1) { ; } } #endif /* _OPENMP */ for (j = jst; j <= jend; j++) { for (m = 0; m < 5; m++) { v[i][j][k][m] = v[i][j][k][m] - omega * ( ldy[i][j][m][0] * v[i][j-1][k][0] + ldx[i][j][m][0] * v[i-1][j][k][0] + ldy[i][j][m][1] * v[i][j-1][k][1] + ldx[i][j][m][1] * v[i-1][j][k][1] + ldy[i][j][m][2] * v[i][j-1][k][2] + ldx[i][j][m][2] * v[i-1][j][k][2] + ldy[i][j][m][3] * v[i][j-1][k][3] + ldx[i][j][m][3] * v[i-1][j][k][3] + ldy[i][j][m][4] * v[i][j-1][k][4] + ldx[i][j][m][4] * v[i-1][j][k][4] ); } /*-------------------------------------------------------------------- c diagonal block inversion c c forward elimination --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { tmat[m][0] = d[i][j][m][0]; tmat[m][1] = d[i][j][m][1]; tmat[m][2] = d[i][j][m][2]; tmat[m][3] = d[i][j][m][3]; tmat[m][4] = d[i][j][m][4]; } tmp1 = 1.0 / tmat[0][0]; tmp = tmp1 * tmat[1][0]; tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; v[i][j][k][1] = v[i][j][k][1] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[2][0]; tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[3][0]; tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][0] * tmp; tmp = tmp1 * tmat[4][0]; tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][0] * tmp; tmp1 = 1.0 / tmat[ 1][1]; tmp = tmp1 * tmat[ 2][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; v[i][j][k][2] = v[i][j][k][2] - v[i][j][k][1] * tmp; tmp = tmp1 * tmat[3][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][1] * tmp; tmp = tmp1 * tmat[4][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][1] * tmp; tmp1 = 1.0 / tmat[2][2]; tmp = tmp1 * tmat[3][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; v[i][j][k][3] = v[i][j][k][3] - v[i][j][k][2] * tmp; tmp = tmp1 * tmat[4][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][2] * tmp; tmp1 = 1.0 / tmat[3][3]; tmp = tmp1 * tmat[4][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; v[i][j][k][4] = v[i][j][k][4] - v[i][j][k][3] * tmp; /*-------------------------------------------------------------------- c back substitution --------------------------------------------------------------------*/ v[i][j][k][4] = v[i][j][k][4] / tmat[4][4]; v[i][j][k][3] = v[i][j][k][3] - tmat[3][4] * v[i][j][k][4]; v[i][j][k][3] = v[i][j][k][3] / tmat[3][3]; v[i][j][k][2] = v[i][j][k][2] - tmat[2][3] * v[i][j][k][3] - tmat[2][4] * v[i][j][k][4]; v[i][j][k][2] = v[i][j][k][2] / tmat[2][2]; v[i][j][k][1] = v[i][j][k][1] - tmat[1][2] * v[i][j][k][2] - tmat[1][3] * v[i][j][k][3] - tmat[1][4] * v[i][j][k][4]; v[i][j][k][1] = v[i][j][k][1] / tmat[1][1]; v[i][j][k][0] = v[i][j][k][0] - tmat[0][1] * v[i][j][k][1] - tmat[0][2] * v[i][j][k][2] - tmat[0][3] * v[i][j][k][3] - tmat[0][4] * v[i][j][k][4]; v[i][j][k][0] = v[i][j][k][0] / tmat[0][0]; } #if defined(_OPENMP) if (i != ist) flag[i-1] = 0; if (i != iend) flag[i] = 1; #endif /* _OPENMP */ } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void buts(int nx, int ny, int nz, int k, double omega, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5], double tv[ISIZ1][ISIZ2][5], double d[ISIZ1][ISIZ2][5][5], double udx[ISIZ1][ISIZ2][5][5], double udy[ISIZ1][ISIZ2][5][5], double udz[ISIZ1][ISIZ2][5][5], int ist, int iend, int jst, int jend, int nx0, int ny0 ) { /*-------------------------------------------------------------------- c c compute the regular-sparse, block upper triangular solution: c c v <-- ( U-inv ) * v c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, m; double tmp, tmp1; double tmat[5][5]; #pragma omp for for (i = iend; i >= ist; i--) { #pragma omp parallel for firstprivate(ist ,iend ,j ,m ,udz ,k ,v ,omega ,tv ,jst ,jend ,i ) for (j = jend; j >= jst; j--) { #pragma omp parallel for firstprivate(ist ,iend ,j ,m ,udz ,k ,v ,omega ,tv ,jst ,jend ,i ) for (m = 0; m < 5; m++) { tv[i][j][m] = omega * ( udz[i][j][m][0] * v[i][j][k+1][0] + udz[i][j][m][1] * v[i][j][k+1][1] + udz[i][j][m][2] * v[i][j][k+1][2] + udz[i][j][m][3] * v[i][j][k+1][3] + udz[i][j][m][4] * v[i][j][k+1][4] ); } } } #pragma omp for for (i = iend; i >= ist; i--) { #if defined(_OPENMP) if (i != iend) { while (flag[i+1] == 0) { ; } } if (i != ist) { while (flag[i] == 1) { ; } } #endif /* _OPENMP */ for (j = jend; j >= jst; j--) { for (m = 0; m < 5; m++) { tv[i][j][m] = tv[i][j][m] + omega * ( udy[i][j][m][0] * v[i][j+1][k][0] + udx[i][j][m][0] * v[i+1][j][k][0] + udy[i][j][m][1] * v[i][j+1][k][1] + udx[i][j][m][1] * v[i+1][j][k][1] + udy[i][j][m][2] * v[i][j+1][k][2] + udx[i][j][m][2] * v[i+1][j][k][2] + udy[i][j][m][3] * v[i][j+1][k][3] + udx[i][j][m][3] * v[i+1][j][k][3] + udy[i][j][m][4] * v[i][j+1][k][4] + udx[i][j][m][4] * v[i+1][j][k][4] ); } /*-------------------------------------------------------------------- c diagonal block inversion --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { tmat[m][0] = d[i][j][m][0]; tmat[m][1] = d[i][j][m][1]; tmat[m][2] = d[i][j][m][2]; tmat[m][3] = d[i][j][m][3]; tmat[m][4] = d[i][j][m][4]; } tmp1 = 1.0 / tmat[0][0]; tmp = tmp1 * tmat[1][0]; tmat[1][1] = tmat[1][1] - tmp * tmat[0][1]; tmat[1][2] = tmat[1][2] - tmp * tmat[0][2]; tmat[1][3] = tmat[1][3] - tmp * tmat[0][3]; tmat[1][4] = tmat[1][4] - tmp * tmat[0][4]; tv[i][j][1] = tv[i][j][1] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[2][0]; tmat[2][1] = tmat[2][1] - tmp * tmat[0][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[0][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[0][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[0][4]; tv[i][j][2] = tv[i][j][2] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[3][0]; tmat[3][1] = tmat[3][1] - tmp * tmat[0][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[0][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[0][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[0][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][0] * tmp; tmp = tmp1 * tmat[4][0]; tmat[4][1] = tmat[4][1] - tmp * tmat[0][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[0][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[0][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[0][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][0] * tmp; tmp1 = 1.0 / tmat[1][1]; tmp = tmp1 * tmat[2][1]; tmat[2][2] = tmat[2][2] - tmp * tmat[1][2]; tmat[2][3] = tmat[2][3] - tmp * tmat[1][3]; tmat[2][4] = tmat[2][4] - tmp * tmat[1][4]; tv[i][j][2] = tv[i][j][2] - tv[i][j][1] * tmp; tmp = tmp1 * tmat[3][1]; tmat[3][2] = tmat[3][2] - tmp * tmat[1][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[1][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[1][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][1] * tmp; tmp = tmp1 * tmat[4][1]; tmat[4][2] = tmat[4][2] - tmp * tmat[1][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[1][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[1][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][1] * tmp; tmp1 = 1.0 / tmat[2][2]; tmp = tmp1 * tmat[3][2]; tmat[3][3] = tmat[3][3] - tmp * tmat[2][3]; tmat[3][4] = tmat[3][4] - tmp * tmat[2][4]; tv[i][j][3] = tv[i][j][3] - tv[i][j][2] * tmp; tmp = tmp1 * tmat[4][2]; tmat[4][3] = tmat[4][3] - tmp * tmat[2][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[2][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][2] * tmp; tmp1 = 1.0 / tmat[3][3]; tmp = tmp1 * tmat[4][3]; tmat[4][4] = tmat[4][4] - tmp * tmat[3][4]; tv[i][j][4] = tv[i][j][4] - tv[i][j][3] * tmp; /*-------------------------------------------------------------------- c back substitution --------------------------------------------------------------------*/ tv[i][j][4] = tv[i][j][4] / tmat[4][4]; tv[i][j][3] = tv[i][j][3] - tmat[3][4] * tv[i][j][4]; tv[i][j][3] = tv[i][j][3] / tmat[3][3]; tv[i][j][2] = tv[i][j][2] - tmat[2][3] * tv[i][j][3] - tmat[2][4] * tv[i][j][4]; tv[i][j][2] = tv[i][j][2] / tmat[2][2]; tv[i][j][1] = tv[i][j][1] - tmat[1][2] * tv[i][j][2] - tmat[1][3] * tv[i][j][3] - tmat[1][4] * tv[i][j][4]; tv[i][j][1] = tv[i][j][1] / tmat[1][1]; tv[i][j][0] = tv[i][j][0] - tmat[0][1] * tv[i][j][1] - tmat[0][2] * tv[i][j][2] - tmat[0][3] * tv[i][j][3] - tmat[0][4] * tv[i][j][4]; tv[i][j][0] = tv[i][j][0] / tmat[0][0]; v[i][j][k][0] = v[i][j][k][0] - tv[i][j][0]; v[i][j][k][1] = v[i][j][k][1] - tv[i][j][1]; v[i][j][k][2] = v[i][j][k][2] - tv[i][j][2]; v[i][j][k][3] = v[i][j][k][3] - tv[i][j][3]; v[i][j][k][4] = v[i][j][k][4] - tv[i][j][4]; } #if defined(_OPENMP) if (i != iend) flag[i+1] = 0; if (i != ist) flag[i] = 1; #endif /* _OPENMP */ } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void domain(void) { /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ nx = nx0; ny = ny0; nz = nz0; /*-------------------------------------------------------------------- c check the sub-domain size --------------------------------------------------------------------*/ if ( nx < 4 || ny < 4 || nz < 4 ) { printf(" SUBDOMAIN SIZE IS TOO SMALL - \n" " ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n" " SO THAT NX, NY AND NZ ARE GREATER THAN OR EQUAL\n" " TO 4 THEY ARE CURRENTLY%3d%3d%3d\n", nx, ny, nz); exit(1); } if ( nx > ISIZ1 || ny > ISIZ2 || nz > ISIZ3 ) { printf(" SUBDOMAIN SIZE IS TOO LARGE - \n" " ADJUST PROBLEM SIZE OR NUMBER OF PROCESSORS\n" " SO THAT NX, NY AND NZ ARE LESS THAN OR EQUAL TO \n" " ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY. THEY ARE\n" " CURRENTLY%4d%4d%4d\n", nx, ny, nz); exit(1); } /*-------------------------------------------------------------------- c set up the start and end in i and j extents for all processors --------------------------------------------------------------------*/ ist = 1; iend = nx - 2; jst = 1; jend = ny - 2; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void erhs(void) { { /*-------------------------------------------------------------------- c c compute the right hand side based on exact solution c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k, m; int iglob, jglob; int L1, L2; int ist1, iend1; int jst1, jend1; double dsspm; double xi, eta, zeta; double q; double u21, u31, u41; double tmp; double u21i, u31i, u41i, u51i; double u21j, u31j, u41j, u51j; double u21k, u31k, u41k, u51k; double u21im1, u31im1, u41im1, u51im1; double u21jm1, u31jm1, u41jm1, u51jm1; double u21km1, u31km1, u41km1, u51km1; dsspm = dssp; #pragma omp parallel for for (i = 0; i < nx; i++) { #pragma omp parallel for firstprivate(nx ,m ,k ,j ,nz ,ny ,i ) for (j = 0; j < ny; j++) { #pragma omp parallel for firstprivate(nx ,m ,k ,j ,nz ,ny ,i ) for (k = 0; k < nz; k++) { #pragma omp parallel for firstprivate(nx ,m ,k ,j ,nz ,ny ,i ) for (m = 0; m < 5; m++) { frct[i][j][k][m] = 0.0; } } } } #pragma omp parallel for for (i = 0; i < nx; i++) { iglob = i; xi = ( (double)(iglob) ) / ( nx0 - 1 ); #pragma omp parallel for firstprivate(nx ,m ,k ,j ,xi ,eta ,zeta ,nx0 ,ny0 ,nz ,ny ,i ) for (j = 0; j < ny; j++) { jglob = j; eta = ( (double)(jglob) ) / ( ny0 - 1 ); #pragma omp parallel for firstprivate(nx ,m ,k ,j ,xi ,eta ,zeta ,nx0 ,ny0 ,nz ,ny ,i ) for (k = 0; k < nz; k++) { zeta = ( (double)(k) ) / ( nz - 1 ); for (m = 0; m < 5; m++) { rsd[i][j][k][m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = nx-1; #pragma omp parallel for for (i = L1; i <= L2; i++) { #pragma omp parallel for firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) for (j = jst; j <= jend; j++) { #pragma omp parallel for firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) for (k = 1; k < nz - 1; k++) { flux[i][j][k][0] = rsd[i][j][k][1]; u21 = rsd[i][j][k][1] / rsd[i][j][k][0]; q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3] ) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u21 + C2 * ( rsd[i][j][k][4] - q ); flux[i][j][k][2] = rsd[i][j][k][2] * u21; flux[i][j][k][3] = rsd[i][j][k][3] * u21; flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u21; } } } #pragma omp parallel for for (j = jst; j <= jend; j++) { #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) for (k = 1; k <= nz - 2; k++) { #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) for (i = ist; i <= iend; i++) { #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) for (m = 0; m < 5; m++) { frct[i][j][k][m] = frct[i][j][k][m] - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] ); } } #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) for (i = ist; i <= L2; i++) { tmp = 1.0 / rsd[i][j][k][0]; u21i = tmp * rsd[i][j][k][1]; u31i = tmp * rsd[i][j][k][2]; u41i = tmp * rsd[i][j][k][3]; u51i = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i-1][j][k][0]; u21im1 = tmp * rsd[i-1][j][k][1]; u31im1 = tmp * rsd[i-1][j][k][2]; u41im1 = tmp * rsd[i-1][j][k][3]; u51im1 = tmp * rsd[i-1][j][k][4]; flux[i][j][k][1] = (4.0/3.0) * tx3 * ( u21i - u21im1 ); flux[i][j][k][2] = tx3 * ( u31i - u31im1 ); flux[i][j][k][3] = tx3 * ( u41i - u41im1 ); flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 ) * tx3 * ( ( u21i * u21i + u31i * u31i + u41i * u41i ) - ( u21im1*u21im1 + u31im1*u31im1 + u41im1*u41im1 ) ) + (1.0/6.0) * tx3 * ( u21i*u21i - u21im1*u21im1 ) + C1 * C5 * tx3 * ( u51i - u51im1 ); } #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,iend1 ,nx ,nz ,j ) for (i = ist; i <= iend; i++) { frct[i][j][k][0] = frct[i][j][k][0] + dx1 * tx1 * ( rsd[i-1][j][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i+1][j][k][0] ); frct[i][j][k][1] = frct[i][j][k][1] + tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] ) + dx2 * tx1 * ( rsd[i-1][j][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i+1][j][k][1] ); frct[i][j][k][2] = frct[i][j][k][2] + tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] ) + dx3 * tx1 * ( rsd[i-1][j][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i+1][j][k][2] ); frct[i][j][k][3] = frct[i][j][k][3] + tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] ) + dx4 * tx1 * ( rsd[i-1][j][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i+1][j][k][3] ); frct[i][j][k][4] = frct[i][j][k][4] + tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] ) + dx5 * tx1 * ( rsd[i-1][j][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i+1][j][k][4] ); } /*-------------------------------------------------------------------- c Fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { frct[1][j][k][m] = frct[1][j][k][m] - dsspm * ( + 5.0 * rsd[1][j][k][m] - 4.0 * rsd[2][j][k][m] + rsd[3][j][k][m] ); frct[2][j][k][m] = frct[2][j][k][m] - dsspm * ( - 4.0 * rsd[1][j][k][m] + 6.0 * rsd[2][j][k][m] - 4.0 * rsd[3][j][k][m] + rsd[4][j][k][m] ); } ist1 = 3; iend1 = nx - 4; for (i = ist1; i <=iend1; i++) { for (m = 0; m < 5; m++) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * ( rsd[i-2][j][k][m] - 4.0 * rsd[i-1][j][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i+1][j][k][m] + rsd[i+2][j][k][m] ); } } for (m = 0; m < 5; m++) { frct[nx-3][j][k][m] = frct[nx-3][j][k][m] - dsspm * ( rsd[nx-5][j][k][m] - 4.0 * rsd[nx-4][j][k][m] + 6.0 * rsd[nx-3][j][k][m] - 4.0 * rsd[nx-2][j][k][m] ); frct[nx-2][j][k][m] = frct[nx-2][j][k][m] - dsspm * ( rsd[nx-4][j][k][m] - 4.0 * rsd[nx-3][j][k][m] + 5.0 * rsd[nx-2][j][k][m] ); } } } /*-------------------------------------------------------------------- c eta-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = ny-1; #pragma omp parallel for for (i = ist; i <= iend; i++) { #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) for (j = L1; j <= L2; j++) { #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) for (k = 1; k <= nz - 2; k++) { flux[i][j][k][0] = rsd[i][j][k][2]; u31 = rsd[i][j][k][2] / rsd[i][j][k][0]; q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3] ) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u31; flux[i][j][k][2] = rsd[i][j][k][2] * u31 + C2 * ( rsd[i][j][k][4] - q ); flux[i][j][k][3] = rsd[i][j][k][3] * u31; flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u31; } } } #pragma omp parallel for for (i = ist; i <= iend; i++) { #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) for (k = 1; k <= nz - 2; k++) { #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) for (j = jst; j <= jend; j++) { #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) for (m = 0; m < 5; m++) { frct[i][j][k][m] = frct[i][j][k][m] - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] ); } } #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) for (j = jst; j <= L2; j++) { tmp = 1.0 / rsd[i][j][k][0]; u21j = tmp * rsd[i][j][k][1]; u31j = tmp * rsd[i][j][k][2]; u41j = tmp * rsd[i][j][k][3]; u51j = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i][j-1][k][0]; u21jm1 = tmp * rsd[i][j-1][k][1]; u31jm1 = tmp * rsd[i][j-1][k][2]; u41jm1 = tmp * rsd[i][j-1][k][3]; u51jm1 = tmp * rsd[i][j-1][k][4]; flux[i][j][k][1] = ty3 * ( u21j - u21jm1 ); flux[i][j][k][2] = (4.0/3.0) * ty3 * ( u31j - u31jm1 ); flux[i][j][k][3] = ty3 * ( u41j - u41jm1 ); flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 ) * ty3 * ( ( u21j *u21j + u31j *u31j + u41j *u41j ) - ( u21jm1*u21jm1 + u31jm1*u31jm1 + u41jm1*u41jm1 ) ) + (1.0/6.0) * ty3 * ( u31j*u31j - u31jm1*u31jm1 ) + C1 * C5 * ty3 * ( u51j - u51jm1 ); } #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,jend1 ,ny ,nz ,i ) for (j = jst; j <= jend; j++) { frct[i][j][k][0] = frct[i][j][k][0] + dy1 * ty1 * ( rsd[i][j-1][k][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j+1][k][0] ); frct[i][j][k][1] = frct[i][j][k][1] + ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] ) + dy2 * ty1 * ( rsd[i][j-1][k][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j+1][k][1] ); frct[i][j][k][2] = frct[i][j][k][2] + ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] ) + dy3 * ty1 * ( rsd[i][j-1][k][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j+1][k][2] ); frct[i][j][k][3] = frct[i][j][k][3] + ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] ) + dy4 * ty1 * ( rsd[i][j-1][k][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j+1][k][3] ); frct[i][j][k][4] = frct[i][j][k][4] + ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] ) + dy5 * ty1 * ( rsd[i][j-1][k][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j+1][k][4] ); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { frct[i][1][k][m] = frct[i][1][k][m] - dsspm * ( + 5.0 * rsd[i][1][k][m] - 4.0 * rsd[i][2][k][m] + rsd[i][3][k][m] ); frct[i][2][k][m] = frct[i][2][k][m] - dsspm * ( - 4.0 * rsd[i][1][k][m] + 6.0 * rsd[i][2][k][m] - 4.0 * rsd[i][3][k][m] + rsd[i][4][k][m] ); } jst1 = 3; jend1 = ny - 4; for (j = jst1; j <= jend1; j++) { for (m = 0; m < 5; m++) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * ( rsd[i][j-2][k][m] - 4.0 * rsd[i][j-1][k][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j+1][k][m] + rsd[i][j+2][k][m] ); } } for (m = 0; m < 5; m++) { frct[i][ny-3][k][m] = frct[i][ny-3][k][m] - dsspm * ( rsd[i][ny-5][k][m] - 4.0 * rsd[i][ny-4][k][m] + 6.0 * rsd[i][ny-3][k][m] - 4.0 * rsd[i][ny-2][k][m] ); frct[i][ny-2][k][m] = frct[i][ny-2][k][m] - dsspm * ( rsd[i][ny-4][k][m] - 4.0 * rsd[i][ny-3][k][m] + 5.0 * rsd[i][ny-2][k][m] ); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences --------------------------------------------------------------------*/ #pragma omp parallel for for (i = ist; i <= iend; i++) { for (j = jst; j <= jend; j++) { #pragma omp parallel for firstprivate(nz ,ist ,jst ,u41 ,q ,k ,j ,i ) for (k = 0; k <= nz-1; k++) { flux[i][j][k][0] = rsd[i][j][k][3]; u41 = rsd[i][j][k][3] / rsd[i][j][k][0]; q = 0.50 * ( rsd[i][j][k][1] * rsd[i][j][k][1] + rsd[i][j][k][2] * rsd[i][j][k][2] + rsd[i][j][k][3] * rsd[i][j][k][3] ) / rsd[i][j][k][0]; flux[i][j][k][1] = rsd[i][j][k][1] * u41; flux[i][j][k][2] = rsd[i][j][k][2] * u41; flux[i][j][k][3] = rsd[i][j][k][3] * u41 + C2 * ( rsd[i][j][k][4] - q ); flux[i][j][k][4] = ( C1 * rsd[i][j][k][4] - C2 * q ) * u41; } #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) for (k = 1; k <= nz - 2; k++) { #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) for (m = 0; m < 5; m++) { frct[i][j][k][m] = frct[i][j][k][m] - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] ); } } #pragma omp parallel for firstprivate(nz ,ist ,jst ,u21k ,u31k ,u41k ,u51k ,tmp ,u21km1 ,u31km1 ,u41km1 ,u51km1 ,tz3 ,k ,j ,i ) for (k = 1; k <= nz-1; k++) { tmp = 1.0 / rsd[i][j][k][0]; u21k = tmp * rsd[i][j][k][1]; u31k = tmp * rsd[i][j][k][2]; u41k = tmp * rsd[i][j][k][3]; u51k = tmp * rsd[i][j][k][4]; tmp = 1.0 / rsd[i][j][k-1][0]; u21km1 = tmp * rsd[i][j][k-1][1]; u31km1 = tmp * rsd[i][j][k-1][2]; u41km1 = tmp * rsd[i][j][k-1][3]; u51km1 = tmp * rsd[i][j][k-1][4]; flux[i][j][k][1] = tz3 * ( u21k - u21km1 ); flux[i][j][k][2] = tz3 * ( u31k - u31km1 ); flux[i][j][k][3] = (4.0/3.0) * tz3 * ( u41k - u41km1 ); flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 ) * tz3 * ( ( u21k *u21k + u31k *u31k + u41k *u41k ) - ( u21km1*u21km1 + u31km1*u31km1 + u41km1*u41km1 ) ) + (1.0/6.0) * tz3 * ( u41k*u41k - u41km1*u41km1 ) + C1 * C5 * tz3 * ( u51k - u51km1 ); } #pragma omp parallel for firstprivate(nz ,ist ,jst ,tz1 ,dz1 ,dz2 ,tz3 ,dz3 ,dz4 ,dz5 ,k ,j ,i ) for (k = 1; k <= nz - 2; k++) { frct[i][j][k][0] = frct[i][j][k][0] + dz1 * tz1 * ( rsd[i][j][k+1][0] - 2.0 * rsd[i][j][k][0] + rsd[i][j][k-1][0] ); frct[i][j][k][1] = frct[i][j][k][1] + tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] ) + dz2 * tz1 * ( rsd[i][j][k+1][1] - 2.0 * rsd[i][j][k][1] + rsd[i][j][k-1][1] ); frct[i][j][k][2] = frct[i][j][k][2] + tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] ) + dz3 * tz1 * ( rsd[i][j][k+1][2] - 2.0 * rsd[i][j][k][2] + rsd[i][j][k-1][2] ); frct[i][j][k][3] = frct[i][j][k][3] + tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] ) + dz4 * tz1 * ( rsd[i][j][k+1][3] - 2.0 * rsd[i][j][k][3] + rsd[i][j][k-1][3] ); frct[i][j][k][4] = frct[i][j][k][4] + tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] ) + dz5 * tz1 * ( rsd[i][j][k+1][4] - 2.0 * rsd[i][j][k][4] + rsd[i][j][k-1][4] ); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { frct[i][j][1][m] = frct[i][j][1][m] - dsspm * ( + 5.0 * rsd[i][j][1][m] - 4.0 * rsd[i][j][2][m] + rsd[i][j][3][m] ); frct[i][j][2][m] = frct[i][j][2][m] - dsspm * (- 4.0 * rsd[i][j][1][m] + 6.0 * rsd[i][j][2][m] - 4.0 * rsd[i][j][3][m] + rsd[i][j][4][m] ); } #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) for (k = 3; k <= nz - 4; k++) { #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) for (m = 0; m < 5; m++) { frct[i][j][k][m] = frct[i][j][k][m] - dsspm * ( rsd[i][j][k-2][m] - 4.0 * rsd[i][j][k-1][m] + 6.0 * rsd[i][j][k][m] - 4.0 * rsd[i][j][k+1][m] + rsd[i][j][k+2][m] ); } } for (m = 0; m < 5; m++) { frct[i][j][nz-3][m] = frct[i][j][nz-3][m] - dsspm * ( rsd[i][j][nz-5][m] - 4.0 * rsd[i][j][nz-4][m] + 6.0 * rsd[i][j][nz-3][m] - 4.0 * rsd[i][j][nz-2][m] ); frct[i][j][nz-2][m] = frct[i][j][nz-2][m] - dsspm * ( rsd[i][j][nz-4][m] - 4.0 * rsd[i][j][nz-3][m] + 5.0 * rsd[i][j][nz-2][m] ); } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void error(void) { /*-------------------------------------------------------------------- c c compute the solution error c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k, m; int iglob, jglob; double tmp; double u000ijk[5]; #pragma omp parallel for firstprivate(m ) for (m = 0; m < 5; m++) { errnm[m] = 0.0; } for (i = ist; i <= iend; i++) { iglob = i; for (j = jst; j <= jend; j++) { jglob = j; for (k = 1; k <= nz-2; k++) { exact( iglob, jglob, k, u000ijk ); #pragma omp parallel for firstprivate(ist ,tmp ,m ,jst ,k ,j ,i ) for (m = 0; m < 5; m++) { tmp = ( u000ijk[m] - u[i][j][k][m] ); errnm[m] = errnm[m] + tmp *tmp; } } } } #pragma omp parallel for firstprivate(nz0 ,ny0 ,nx0 ,m ) for (m = 0; m < 5; m++) { errnm[m] = sqrt ( errnm[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) ); } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void exact( int i, int j, int k, double u000ijk[5] ) { /*-------------------------------------------------------------------- c c compute the exact solution at (i,j,k) c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int m; double xi, eta, zeta; xi = ((double)i) / (nx0 - 1); eta = ((double)j) / (ny0 - 1); zeta = ((double)k) / (nz - 1); #pragma omp parallel for firstprivate(zeta ,eta ,xi ,u000ijk ,m ) for (m = 0; m < 5; m++) { u000ijk[m] = ce[m][0] + ce[m][1] * xi + ce[m][2] * eta + ce[m][3] * zeta + ce[m][4] * xi * xi + ce[m][5] * eta * eta + ce[m][6] * zeta * zeta + ce[m][7] * xi * xi * xi + ce[m][8] * eta * eta * eta + ce[m][9] * zeta * zeta * zeta + ce[m][10] * xi * xi * xi * xi + ce[m][11] * eta * eta * eta * eta + ce[m][12] * zeta * zeta * zeta * zeta; } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void jacld(int k) { /*-------------------------------------------------------------------- c compute the lower triangular part of the jacobian matrix --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j; double r43; double c1345; double c34; double tmp1, tmp2, tmp3; r43 = ( 4.0 / 3.0 ); c1345 = C1 * C3 * C4 * C5; c34 = C3 * C4; #pragma omp for for (i = ist; i <= iend; i++) { #pragma omp parallel for firstprivate(iend ,ist ,j ,tmp1 ,tmp2 ,tmp3 ,k ,dz1 ,tz1 ,dy1 ,ty1 ,dx1 ,tx1 ,dt ,dz2 ,dy2 ,dx2 ,dz3 ,dy3 ,dx3 ,dz4 ,dy4 ,dx4 ,dz5 ,dy5 ,dx5 ,tz2 ,ty2 ,tx2 ,jst ,jend ,i ) for (j = jst; j <= jend; j++) { /*-------------------------------------------------------------------- c form the block daigonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; d[i][j][0][0] = 1.0 + dt * 2.0 * ( tx1 * dx1 + ty1 * dy1 + tz1 * dz1 ); d[i][j][0][1] = 0.0; d[i][j][0][2] = 0.0; d[i][j][0][3] = 0.0; d[i][j][0][4] = 0.0; d[i][j][1][0] = dt * 2.0 * ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] ) + ty1 * ( - c34 * tmp2 * u[i][j][k][1] ) + tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) ); d[i][j][1][1] = 1.0 + dt * 2.0 * ( tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1 ) + dt * 2.0 * ( tx1 * dx2 + ty1 * dy2 + tz1 * dz2 ); d[i][j][1][2] = 0.0; d[i][j][1][3] = 0.0; d[i][j][1][4] = 0.0; d[i][j][2][0] = dt * 2.0 * ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] ) + ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] ) + tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) ); d[i][j][2][1] = 0.0; d[i][j][2][2] = 1.0 + dt * 2.0 * ( tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1 ) + dt * 2.0 * ( tx1 * dx3 + ty1 * dy3 + tz1 * dz3 ); d[i][j][2][3] = 0.0; d[i][j][2][4] = 0.0; d[i][j][3][0] = dt * 2.0 * ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] ) + ty1 * ( - c34 * tmp2 * u[i][j][k][3] ) + tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) ); d[i][j][3][1] = 0.0; d[i][j][3][2] = 0.0; d[i][j][3][3] = 1.0 + dt * 2.0 * ( tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1 ) + dt * 2.0 * ( tx1 * dx4 + ty1 * dy4 + tz1 * dz4 ); d[i][j][3][4] = 0.0; d[i][j][4][0] = dt * 2.0 * ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) ) - ( c1345 ) * tmp2 * u[i][j][k][4] ) + ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) ) - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) ) - ( c1345 ) * tmp2 * u[i][j][k][4] ) + tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) ) - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) ) - ( c1345 ) * tmp2 * u[i][j][k][4] ) ); d[i][j][4][1] = dt * 2.0 * ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1] + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] ); d[i][j][4][2] = dt * 2.0 * ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] + ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2] + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] ); d[i][j][4][3] = dt * 2.0 * ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3] + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3] + tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] ); d[i][j][4][4] = 1.0 + dt * 2.0 * ( tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1 ) + dt * 2.0 * ( tx1 * dx5 + ty1 * dy5 + tz1 * dz5 ); /*-------------------------------------------------------------------- c form the first block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k-1][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; a[i][j][0][0] = - dt * tz1 * dz1; a[i][j][0][1] = 0.0; a[i][j][0][2] = 0.0; a[i][j][0][3] = - dt * tz2; a[i][j][0][4] = 0.0; a[i][j][1][0] = - dt * tz2 * ( - ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 ) - dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][1] ); a[i][j][1][1] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 ) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2 ; a[i][j][1][2] = 0.0; a[i][j][1][3] = - dt * tz2 * ( u[i][j][k-1][1] * tmp1 ); a[i][j][1][4] = 0.0; a[i][j][2][0] = - dt * tz2 * ( - ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 ) - dt * tz1 * ( - c34 * tmp2 * u[i][j][k-1][2] ); a[i][j][2][1] = 0.0; a[i][j][2][2] = - dt * tz2 * ( u[i][j][k-1][3] * tmp1 ) - dt * tz1 * ( c34 * tmp1 ) - dt * tz1 * dz3; a[i][j][2][3] = - dt * tz2 * ( u[i][j][k-1][2] * tmp1 ); a[i][j][2][4] = 0.0; a[i][j][3][0] = - dt * tz2 * ( - ( u[i][j][k-1][3] * tmp1 ) *( u[i][j][k-1][3] * tmp1 ) + 0.50 * C2 * ( ( u[i][j][k-1][1] * u[i][j][k-1][1] + u[i][j][k-1][2] * u[i][j][k-1][2] + u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2 ) ) - dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k-1][3] ); a[i][j][3][1] = - dt * tz2 * ( - C2 * ( u[i][j][k-1][1] * tmp1 ) ); a[i][j][3][2] = - dt * tz2 * ( - C2 * ( u[i][j][k-1][2] * tmp1 ) ); a[i][j][3][3] = - dt * tz2 * ( 2.0 - C2 ) * ( u[i][j][k-1][3] * tmp1 ) - dt * tz1 * ( r43 * c34 * tmp1 ) - dt * tz1 * dz4; a[i][j][3][4] = - dt * tz2 * C2; a[i][j][4][0] = - dt * tz2 * ( ( C2 * ( u[i][j][k-1][1] * u[i][j][k-1][1] + u[i][j][k-1][2] * u[i][j][k-1][2] + u[i][j][k-1][3] * u[i][j][k-1][3] ) * tmp2 - C1 * ( u[i][j][k-1][4] * tmp1 ) ) * ( u[i][j][k-1][3] * tmp1 ) ) - dt * tz1 * ( - ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][1]*u[i][j][k-1][1]) - ( c34 - c1345 ) * tmp3 * (u[i][j][k-1][2]*u[i][j][k-1][2]) - ( r43*c34 - c1345 )* tmp3 * (u[i][j][k-1][3]*u[i][j][k-1][3]) - c1345 * tmp2 * u[i][j][k-1][4] ); a[i][j][4][1] = - dt * tz2 * ( - C2 * ( u[i][j][k-1][1]*u[i][j][k-1][3] ) * tmp2 ) - dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][1]; a[i][j][4][2] = - dt * tz2 * ( - C2 * ( u[i][j][k-1][2]*u[i][j][k-1][3] ) * tmp2 ) - dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k-1][2]; a[i][j][4][3] = - dt * tz2 * ( C1 * ( u[i][j][k-1][4] * tmp1 ) - 0.50 * C2 * ( ( u[i][j][k-1][1]*u[i][j][k-1][1] + u[i][j][k-1][2]*u[i][j][k-1][2] + 3.0*u[i][j][k-1][3]*u[i][j][k-1][3] ) * tmp2 ) ) - dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k-1][3]; a[i][j][4][4] = - dt * tz2 * ( C1 * ( u[i][j][k-1][3] * tmp1 ) ) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; /*-------------------------------------------------------------------- c form the second block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j-1][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; b[i][j][0][0] = - dt * ty1 * dy1; b[i][j][0][1] = 0.0; b[i][j][0][2] = - dt * ty2; b[i][j][0][3] = 0.0; b[i][j][0][4] = 0.0; b[i][j][1][0] = - dt * ty2 * ( - ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 ) - dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][1] ); b[i][j][1][1] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 ) - dt * ty1 * ( c34 * tmp1 ) - dt * ty1 * dy2; b[i][j][1][2] = - dt * ty2 * ( u[i][j-1][k][1] * tmp1 ); b[i][j][1][3] = 0.0; b[i][j][1][4] = 0.0; b[i][j][2][0] = - dt * ty2 * ( - ( u[i][j-1][k][2] * tmp1 ) *( u[i][j-1][k][2] * tmp1 ) + 0.50 * C2 * ( ( u[i][j-1][k][1] * u[i][j-1][k][1] + u[i][j-1][k][2] * u[i][j-1][k][2] + u[i][j-1][k][3] * u[i][j-1][k][3] ) * tmp2 ) ) - dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j-1][k][2] ); b[i][j][2][1] = - dt * ty2 * ( - C2 * ( u[i][j-1][k][1] * tmp1 ) ); b[i][j][2][2] = - dt * ty2 * ( ( 2.0 - C2 ) * ( u[i][j-1][k][2] * tmp1 ) ) - dt * ty1 * ( r43 * c34 * tmp1 ) - dt * ty1 * dy3; b[i][j][2][3] = - dt * ty2 * ( - C2 * ( u[i][j-1][k][3] * tmp1 ) ); b[i][j][2][4] = - dt * ty2 * C2; b[i][j][3][0] = - dt * ty2 * ( - ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 ) - dt * ty1 * ( - c34 * tmp2 * u[i][j-1][k][3] ); b[i][j][3][1] = 0.0; b[i][j][3][2] = - dt * ty2 * ( u[i][j-1][k][3] * tmp1 ); b[i][j][3][3] = - dt * ty2 * ( u[i][j-1][k][2] * tmp1 ) - dt * ty1 * ( c34 * tmp1 ) - dt * ty1 * dy4; b[i][j][3][4] = 0.0; b[i][j][4][0] = - dt * ty2 * ( ( C2 * ( u[i][j-1][k][1] * u[i][j-1][k][1] + u[i][j-1][k][2] * u[i][j-1][k][2] + u[i][j-1][k][3] * u[i][j-1][k][3] ) * tmp2 - C1 * ( u[i][j-1][k][4] * tmp1 ) ) * ( u[i][j-1][k][2] * tmp1 ) ) - dt * ty1 * ( - ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][1])) - ( r43*c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][2])) - ( c34 - c1345 )*tmp3*(pow2(u[i][j-1][k][3])) - c1345*tmp2*u[i][j-1][k][4] ); b[i][j][4][1] = - dt * ty2 * ( - C2 * ( u[i][j-1][k][1]*u[i][j-1][k][2] ) * tmp2 ) - dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j-1][k][1]; b[i][j][4][2] = - dt * ty2 * ( C1 * ( u[i][j-1][k][4] * tmp1 ) - 0.50 * C2 * ( ( u[i][j-1][k][1]*u[i][j-1][k][1] + 3.0 * u[i][j-1][k][2]*u[i][j-1][k][2] + u[i][j-1][k][3]*u[i][j-1][k][3] ) * tmp2 ) ) - dt * ty1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j-1][k][2]; b[i][j][4][3] = - dt * ty2 * ( - C2 * ( u[i][j-1][k][2]*u[i][j-1][k][3] ) * tmp2 ) - dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j-1][k][3]; b[i][j][4][4] = - dt * ty2 * ( C1 * ( u[i][j-1][k][2] * tmp1 ) ) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /*-------------------------------------------------------------------- c form the third block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i-1][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; c[i][j][0][0] = - dt * tx1 * dx1; c[i][j][0][1] = - dt * tx2; c[i][j][0][2] = 0.0; c[i][j][0][3] = 0.0; c[i][j][0][4] = 0.0; c[i][j][1][0] = - dt * tx2 * ( - ( u[i-1][j][k][1] * tmp1 ) *( u[i-1][j][k][1] * tmp1 ) + C2 * 0.50 * ( u[i-1][j][k][1] * u[i-1][j][k][1] + u[i-1][j][k][2] * u[i-1][j][k][2] + u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2 ) - dt * tx1 * ( - r43 * c34 * tmp2 * u[i-1][j][k][1] ); c[i][j][1][1] = - dt * tx2 * ( ( 2.0 - C2 ) * ( u[i-1][j][k][1] * tmp1 ) ) - dt * tx1 * ( r43 * c34 * tmp1 ) - dt * tx1 * dx2; c[i][j][1][2] = - dt * tx2 * ( - C2 * ( u[i-1][j][k][2] * tmp1 ) ); c[i][j][1][3] = - dt * tx2 * ( - C2 * ( u[i-1][j][k][3] * tmp1 ) ); c[i][j][1][4] = - dt * tx2 * C2; c[i][j][2][0] = - dt * tx2 * ( - ( u[i-1][j][k][1] * u[i-1][j][k][2] ) * tmp2 ) - dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][2] ); c[i][j][2][1] = - dt * tx2 * ( u[i-1][j][k][2] * tmp1 ); c[i][j][2][2] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 ) - dt * tx1 * ( c34 * tmp1 ) - dt * tx1 * dx3; c[i][j][2][3] = 0.0; c[i][j][2][4] = 0.0; c[i][j][3][0] = - dt * tx2 * ( - ( u[i-1][j][k][1]*u[i-1][j][k][3] ) * tmp2 ) - dt * tx1 * ( - c34 * tmp2 * u[i-1][j][k][3] ); c[i][j][3][1] = - dt * tx2 * ( u[i-1][j][k][3] * tmp1 ); c[i][j][3][2] = 0.0; c[i][j][3][3] = - dt * tx2 * ( u[i-1][j][k][1] * tmp1 ) - dt * tx1 * ( c34 * tmp1 ) - dt * tx1 * dx4; c[i][j][3][4] = 0.0; c[i][j][4][0] = - dt * tx2 * ( ( C2 * ( u[i-1][j][k][1] * u[i-1][j][k][1] + u[i-1][j][k][2] * u[i-1][j][k][2] + u[i-1][j][k][3] * u[i-1][j][k][3] ) * tmp2 - C1 * ( u[i-1][j][k][4] * tmp1 ) ) * ( u[i-1][j][k][1] * tmp1 ) ) - dt * tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][2]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i-1][j][k][3]) ) - c1345 * tmp2 * u[i-1][j][k][4] ); c[i][j][4][1] = - dt * tx2 * ( C1 * ( u[i-1][j][k][4] * tmp1 ) - 0.50 * C2 * ( ( 3.0*u[i-1][j][k][1]*u[i-1][j][k][1] + u[i-1][j][k][2]*u[i-1][j][k][2] + u[i-1][j][k][3]*u[i-1][j][k][3] ) * tmp2 ) ) - dt * tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i-1][j][k][1]; c[i][j][4][2] = - dt * tx2 * ( - C2 * ( u[i-1][j][k][2]*u[i-1][j][k][1] ) * tmp2 ) - dt * tx1 * ( c34 - c1345 ) * tmp2 * u[i-1][j][k][2]; c[i][j][4][3] = - dt * tx2 * ( - C2 * ( u[i-1][j][k][3]*u[i-1][j][k][1] ) * tmp2 ) - dt * tx1 * ( c34 - c1345 ) * tmp2 * u[i-1][j][k][3]; c[i][j][4][4] = - dt * tx2 * ( C1 * ( u[i-1][j][k][1] * tmp1 ) ) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void jacu(int k) { /*-------------------------------------------------------------------- c compute the upper triangular part of the jacobian matrix --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j; double r43; double c1345; double c34; double tmp1, tmp2, tmp3; r43 = ( 4.0 / 3.0 ); c1345 = C1 * C3 * C4 * C5; c34 = C3 * C4; #if defined(_OPENMP) #pragma omp for for (i = iend; i >= ist; i--) { #pragma omp parallel for firstprivate(ist ,iend ,j ,tmp1 ,tmp2 ,tmp3 ,k ,dz1 ,tz1 ,dy1 ,ty1 ,dx1 ,tx1 ,dt ,dz2 ,dy2 ,dx2 ,dz3 ,dy3 ,dx3 ,dz4 ,dy4 ,dx4 ,dz5 ,dy5 ,dx5 ,tx2 ,ty2 ,tz2 ,jst ,jend ,i ) for (j = jend; j >= jst; j--) { #else for (i = ist; i <= iend; i++) { for (j = jst; j <= jend; j++) { #endif /*-------------------------------------------------------------------- c form the block daigonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; d[i][j][0][0] = 1.0 + dt * 2.0 * ( tx1 * dx1 + ty1 * dy1 + tz1 * dz1 ); d[i][j][0][1] = 0.0; d[i][j][0][2] = 0.0; d[i][j][0][3] = 0.0; d[i][j][0][4] = 0.0; d[i][j][1][0] = dt * 2.0 * ( tx1 * ( - r43 * c34 * tmp2 * u[i][j][k][1] ) + ty1 * ( - c34 * tmp2 * u[i][j][k][1] ) + tz1 * ( - c34 * tmp2 * u[i][j][k][1] ) ); d[i][j][1][1] = 1.0 + dt * 2.0 * ( tx1 * r43 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * c34 * tmp1 ) + dt * 2.0 * ( tx1 * dx2 + ty1 * dy2 + tz1 * dz2 ); d[i][j][1][2] = 0.0; d[i][j][1][3] = 0.0; d[i][j][1][4] = 0.0; d[i][j][2][0] = dt * 2.0 * ( tx1 * ( - c34 * tmp2 * u[i][j][k][2] ) + ty1 * ( - r43 * c34 * tmp2 * u[i][j][k][2] ) + tz1 * ( - c34 * tmp2 * u[i][j][k][2] ) ); d[i][j][2][1] = 0.0; d[i][j][2][2] = 1.0 + dt * 2.0 * ( tx1 * c34 * tmp1 + ty1 * r43 * c34 * tmp1 + tz1 * c34 * tmp1 ) + dt * 2.0 * ( tx1 * dx3 + ty1 * dy3 + tz1 * dz3 ); d[i][j][2][3] = 0.0; d[i][j][2][4] = 0.0; d[i][j][3][0] = dt * 2.0 * ( tx1 * ( - c34 * tmp2 * u[i][j][k][3] ) + ty1 * ( - c34 * tmp2 * u[i][j][k][3] ) + tz1 * ( - r43 * c34 * tmp2 * u[i][j][k][3] ) ); d[i][j][3][1] = 0.0; d[i][j][3][2] = 0.0; d[i][j][3][3] = 1.0 + dt * 2.0 * ( tx1 * c34 * tmp1 + ty1 * c34 * tmp1 + tz1 * r43 * c34 * tmp1 ) + dt * 2.0 * ( tx1 * dx4 + ty1 * dy4 + tz1 * dz4 ); d[i][j][3][4] = 0.0; d[i][j][4][0] = dt * 2.0 * ( tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) ) - ( c1345 ) * tmp2 * u[i][j][k][4] ) + ty1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) ) - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) ) - ( c1345 ) * tmp2 * u[i][j][k][4] ) + tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][2]) ) - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k][3]) ) - ( c1345 ) * tmp2 * u[i][j][k][4] ) ); d[i][j][4][1] = dt * 2.0 * ( tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][1] + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][1] ); d[i][j][4][2] = dt * 2.0 * ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] + ty1 * ( r43*c34 -c1345 ) * tmp2 * u[i][j][k][2] + tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][2] ); d[i][j][4][3] = dt * 2.0 * ( tx1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3] + ty1 * ( c34 - c1345 ) * tmp2 * u[i][j][k][3] + tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k][3] ); d[i][j][4][4] = 1.0 + dt * 2.0 * ( tx1 * c1345 * tmp1 + ty1 * c1345 * tmp1 + tz1 * c1345 * tmp1 ) + dt * 2.0 * ( tx1 * dx5 + ty1 * dy5 + tz1 * dz5 ); /*-------------------------------------------------------------------- c form the first block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i+1][j][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; a[i][j][0][0] = - dt * tx1 * dx1; a[i][j][0][1] = dt * tx2; a[i][j][0][2] = 0.0; a[i][j][0][3] = 0.0; a[i][j][0][4] = 0.0; a[i][j][1][0] = dt * tx2 * ( - ( u[i+1][j][k][1] * tmp1 ) *( u[i+1][j][k][1] * tmp1 ) + C2 * 0.50 * ( u[i+1][j][k][1] * u[i+1][j][k][1] + u[i+1][j][k][2] * u[i+1][j][k][2] + u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2 ) - dt * tx1 * ( - r43 * c34 * tmp2 * u[i+1][j][k][1] ); a[i][j][1][1] = dt * tx2 * ( ( 2.0 - C2 ) * ( u[i+1][j][k][1] * tmp1 ) ) - dt * tx1 * ( r43 * c34 * tmp1 ) - dt * tx1 * dx2; a[i][j][1][2] = dt * tx2 * ( - C2 * ( u[i+1][j][k][2] * tmp1 ) ); a[i][j][1][3] = dt * tx2 * ( - C2 * ( u[i+1][j][k][3] * tmp1 ) ); a[i][j][1][4] = dt * tx2 * C2 ; a[i][j][2][0] = dt * tx2 * ( - ( u[i+1][j][k][1] * u[i+1][j][k][2] ) * tmp2 ) - dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][2] ); a[i][j][2][1] = dt * tx2 * ( u[i+1][j][k][2] * tmp1 ); a[i][j][2][2] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 ) - dt * tx1 * ( c34 * tmp1 ) - dt * tx1 * dx3; a[i][j][2][3] = 0.0; a[i][j][2][4] = 0.0; a[i][j][3][0] = dt * tx2 * ( - ( u[i+1][j][k][1]*u[i+1][j][k][3] ) * tmp2 ) - dt * tx1 * ( - c34 * tmp2 * u[i+1][j][k][3] ); a[i][j][3][1] = dt * tx2 * ( u[i+1][j][k][3] * tmp1 ); a[i][j][3][2] = 0.0; a[i][j][3][3] = dt * tx2 * ( u[i+1][j][k][1] * tmp1 ) - dt * tx1 * ( c34 * tmp1 ) - dt * tx1 * dx4; a[i][j][3][4] = 0.0; a[i][j][4][0] = dt * tx2 * ( ( C2 * ( u[i+1][j][k][1] * u[i+1][j][k][1] + u[i+1][j][k][2] * u[i+1][j][k][2] + u[i+1][j][k][3] * u[i+1][j][k][3] ) * tmp2 - C1 * ( u[i+1][j][k][4] * tmp1 ) ) * ( u[i+1][j][k][1] * tmp1 ) ) - dt * tx1 * ( - ( r43*c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][2]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i+1][j][k][3]) ) - c1345 * tmp2 * u[i+1][j][k][4] ); a[i][j][4][1] = dt * tx2 * ( C1 * ( u[i+1][j][k][4] * tmp1 ) - 0.50 * C2 * ( ( 3.0*u[i+1][j][k][1]*u[i+1][j][k][1] + u[i+1][j][k][2]*u[i+1][j][k][2] + u[i+1][j][k][3]*u[i+1][j][k][3] ) * tmp2 ) ) - dt * tx1 * ( r43*c34 - c1345 ) * tmp2 * u[i+1][j][k][1]; a[i][j][4][2] = dt * tx2 * ( - C2 * ( u[i+1][j][k][2]*u[i+1][j][k][1] ) * tmp2 ) - dt * tx1 * ( c34 - c1345 ) * tmp2 * u[i+1][j][k][2]; a[i][j][4][3] = dt * tx2 * ( - C2 * ( u[i+1][j][k][3]*u[i+1][j][k][1] ) * tmp2 ) - dt * tx1 * ( c34 - c1345 ) * tmp2 * u[i+1][j][k][3]; a[i][j][4][4] = dt * tx2 * ( C1 * ( u[i+1][j][k][1] * tmp1 ) ) - dt * tx1 * c1345 * tmp1 - dt * tx1 * dx5; /*-------------------------------------------------------------------- c form the second block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j+1][k][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; b[i][j][0][0] = - dt * ty1 * dy1; b[i][j][0][1] = 0.0; b[i][j][0][2] = dt * ty2; b[i][j][0][3] = 0.0; b[i][j][0][4] = 0.0; b[i][j][1][0] = dt * ty2 * ( - ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 ) - dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][1] ); b[i][j][1][1] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 ) - dt * ty1 * ( c34 * tmp1 ) - dt * ty1 * dy2; b[i][j][1][2] = dt * ty2 * ( u[i][j+1][k][1] * tmp1 ); b[i][j][1][3] = 0.0; b[i][j][1][4] = 0.0; b[i][j][2][0] = dt * ty2 * ( - ( u[i][j+1][k][2] * tmp1 ) *( u[i][j+1][k][2] * tmp1 ) + 0.50 * C2 * ( ( u[i][j+1][k][1] * u[i][j+1][k][1] + u[i][j+1][k][2] * u[i][j+1][k][2] + u[i][j+1][k][3] * u[i][j+1][k][3] ) * tmp2 ) ) - dt * ty1 * ( - r43 * c34 * tmp2 * u[i][j+1][k][2] ); b[i][j][2][1] = dt * ty2 * ( - C2 * ( u[i][j+1][k][1] * tmp1 ) ); b[i][j][2][2] = dt * ty2 * ( ( 2.0 - C2 ) * ( u[i][j+1][k][2] * tmp1 ) ) - dt * ty1 * ( r43 * c34 * tmp1 ) - dt * ty1 * dy3; b[i][j][2][3] = dt * ty2 * ( - C2 * ( u[i][j+1][k][3] * tmp1 ) ); b[i][j][2][4] = dt * ty2 * C2; b[i][j][3][0] = dt * ty2 * ( - ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 ) - dt * ty1 * ( - c34 * tmp2 * u[i][j+1][k][3] ); b[i][j][3][1] = 0.0; b[i][j][3][2] = dt * ty2 * ( u[i][j+1][k][3] * tmp1 ); b[i][j][3][3] = dt * ty2 * ( u[i][j+1][k][2] * tmp1 ) - dt * ty1 * ( c34 * tmp1 ) - dt * ty1 * dy4; b[i][j][3][4] = 0.0; b[i][j][4][0] = dt * ty2 * ( ( C2 * ( u[i][j+1][k][1] * u[i][j+1][k][1] + u[i][j+1][k][2] * u[i][j+1][k][2] + u[i][j+1][k][3] * u[i][j+1][k][3] ) * tmp2 - C1 * ( u[i][j+1][k][4] * tmp1 ) ) * ( u[i][j+1][k][2] * tmp1 ) ) - dt * ty1 * ( - ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][1]) ) - ( r43*c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][2]) ) - ( c34 - c1345 )*tmp3*( pow2(u[i][j+1][k][3]) ) - c1345*tmp2*u[i][j+1][k][4] ); b[i][j][4][1] = dt * ty2 * ( - C2 * ( u[i][j+1][k][1]*u[i][j+1][k][2] ) * tmp2 ) - dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j+1][k][1]; b[i][j][4][2] = dt * ty2 * ( C1 * ( u[i][j+1][k][4] * tmp1 ) - 0.50 * C2 * ( ( u[i][j+1][k][1]*u[i][j+1][k][1] + 3.0 * u[i][j+1][k][2]*u[i][j+1][k][2] + u[i][j+1][k][3]*u[i][j+1][k][3] ) * tmp2 ) ) - dt * ty1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j+1][k][2]; b[i][j][4][3] = dt * ty2 * ( - C2 * ( u[i][j+1][k][2]*u[i][j+1][k][3] ) * tmp2 ) - dt * ty1 * ( c34 - c1345 ) * tmp2 * u[i][j+1][k][3]; b[i][j][4][4] = dt * ty2 * ( C1 * ( u[i][j+1][k][2] * tmp1 ) ) - dt * ty1 * c1345 * tmp1 - dt * ty1 * dy5; /*-------------------------------------------------------------------- c form the third block sub-diagonal --------------------------------------------------------------------*/ tmp1 = 1.0 / u[i][j][k+1][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; c[i][j][0][0] = - dt * tz1 * dz1; c[i][j][0][1] = 0.0; c[i][j][0][2] = 0.0; c[i][j][0][3] = dt * tz2; c[i][j][0][4] = 0.0; c[i][j][1][0] = dt * tz2 * ( - ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 ) - dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][1] ); c[i][j][1][1] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 ) - dt * tz1 * c34 * tmp1 - dt * tz1 * dz2 ; c[i][j][1][2] = 0.0; c[i][j][1][3] = dt * tz2 * ( u[i][j][k+1][1] * tmp1 ); c[i][j][1][4] = 0.0; c[i][j][2][0] = dt * tz2 * ( - ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 ) - dt * tz1 * ( - c34 * tmp2 * u[i][j][k+1][2] ); c[i][j][2][1] = 0.0; c[i][j][2][2] = dt * tz2 * ( u[i][j][k+1][3] * tmp1 ) - dt * tz1 * ( c34 * tmp1 ) - dt * tz1 * dz3; c[i][j][2][3] = dt * tz2 * ( u[i][j][k+1][2] * tmp1 ); c[i][j][2][4] = 0.0; c[i][j][3][0] = dt * tz2 * ( - ( u[i][j][k+1][3] * tmp1 ) *( u[i][j][k+1][3] * tmp1 ) + 0.50 * C2 * ( ( u[i][j][k+1][1] * u[i][j][k+1][1] + u[i][j][k+1][2] * u[i][j][k+1][2] + u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2 ) ) - dt * tz1 * ( - r43 * c34 * tmp2 * u[i][j][k+1][3] ); c[i][j][3][1] = dt * tz2 * ( - C2 * ( u[i][j][k+1][1] * tmp1 ) ); c[i][j][3][2] = dt * tz2 * ( - C2 * ( u[i][j][k+1][2] * tmp1 ) ); c[i][j][3][3] = dt * tz2 * ( 2.0 - C2 ) * ( u[i][j][k+1][3] * tmp1 ) - dt * tz1 * ( r43 * c34 * tmp1 ) - dt * tz1 * dz4; c[i][j][3][4] = dt * tz2 * C2; c[i][j][4][0] = dt * tz2 * ( ( C2 * ( u[i][j][k+1][1] * u[i][j][k+1][1] + u[i][j][k+1][2] * u[i][j][k+1][2] + u[i][j][k+1][3] * u[i][j][k+1][3] ) * tmp2 - C1 * ( u[i][j][k+1][4] * tmp1 ) ) * ( u[i][j][k+1][3] * tmp1 ) ) - dt * tz1 * ( - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][1]) ) - ( c34 - c1345 ) * tmp3 * ( pow2(u[i][j][k+1][2]) ) - ( r43*c34 - c1345 )* tmp3 * ( pow2(u[i][j][k+1][3]) ) - c1345 * tmp2 * u[i][j][k+1][4] ); c[i][j][4][1] = dt * tz2 * ( - C2 * ( u[i][j][k+1][1]*u[i][j][k+1][3] ) * tmp2 ) - dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][1]; c[i][j][4][2] = dt * tz2 * ( - C2 * ( u[i][j][k+1][2]*u[i][j][k+1][3] ) * tmp2 ) - dt * tz1 * ( c34 - c1345 ) * tmp2 * u[i][j][k+1][2]; c[i][j][4][3] = dt * tz2 * ( C1 * ( u[i][j][k+1][4] * tmp1 ) - 0.50 * C2 * ( ( u[i][j][k+1][1]*u[i][j][k+1][1] + u[i][j][k+1][2]*u[i][j][k+1][2] + 3.0*u[i][j][k+1][3]*u[i][j][k+1][3] ) * tmp2 ) ) - dt * tz1 * ( r43*c34 - c1345 ) * tmp2 * u[i][j][k+1][3]; c[i][j][4][4] = dt * tz2 * ( C1 * ( u[i][j][k+1][3] * tmp1 ) ) - dt * tz1 * c1345 * tmp1 - dt * tz1 * dz5; } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void l2norm (int nx0, int ny0, int nz0, int ist, int iend, int jst, int jend, /*-------------------------------------------------------------------- c To improve cache performance, second two dimensions padded by 1 c for even number sizes only. Only needed in v. --------------------------------------------------------------------*/ double v[ISIZ1][ISIZ2/2*2+1][ISIZ3/2*2+1][5], double sum[5]) { { /*-------------------------------------------------------------------- c to compute the l2-norm of vector v. --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k, m; double sum0=0.0, sum1=0.0, sum2=0.0, sum3=0.0, sum4=0.0; #pragma omp parallel for firstprivate(sum ,m ) for (m = 0; m < 5; m++) { sum[m] = 0.0; } #pragma omp parallel for for (i = ist; i <= iend; i++) { #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,v ,nz0 ,jst ,jend ,i ) reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) for (j = jst; j <= jend; j++) { #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,v ,nz0 ,jst ,jend ,i ) reduction(+:sum4) reduction(+:sum3) reduction(+:sum2) reduction(+:sum1) reduction(+:sum0) for (k = 1; k <= nz0-2; k++) { sum0 = sum0 + v[i][j][k][0] * v[i][j][k][0]; sum1 = sum1 + v[i][j][k][1] * v[i][j][k][1]; sum2 = sum2 + v[i][j][k][2] * v[i][j][k][2]; sum3 = sum3 + v[i][j][k][3] * v[i][j][k][3]; sum4 = sum4 + v[i][j][k][4] * v[i][j][k][4]; } } } { sum[0] += sum0; sum[1] += sum1; sum[2] += sum2; sum[3] += sum3; sum[4] += sum4; } #pragma omp parallel for firstprivate(nz0 ,ny0 ,nx0 ,sum ,m ) for (m = 0; m < 5; m++) { sum[m] = sqrt ( sum[m] / ( (nx0-2)*(ny0-2)*(nz0-2) ) ); } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void pintgr(void) { /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k; int ibeg, ifin, ifin1; int jbeg, jfin, jfin1; int iglob, iglob1, iglob2; int jglob, jglob1, jglob2; double phi1[ISIZ2+2][ISIZ3+2]; /* phi1(0:isiz2+1,0:isiz3+1) */ double phi2[ISIZ2+2][ISIZ3+2]; /* phi2(0:isiz2+1,0:isiz3+1) */ double frc1, frc2, frc3; /*-------------------------------------------------------------------- c set up the sub-domains for integeration in each processor --------------------------------------------------------------------*/ ibeg = nx; ifin = 0; iglob1 = -1; iglob2 = nx-1; if (iglob1 >= ii1 && iglob2 < ii2+nx) ibeg = 0; if (iglob1 >= ii1-nx && iglob2 <= ii2) ifin = nx; if (ii1 >= iglob1 && ii1 <= iglob2) ibeg = ii1; if (ii2 >= iglob1 && ii2 <= iglob2) ifin = ii2; jbeg = ny; jfin = -1; jglob1 = 0; jglob2 = ny-1; if (jglob1 >= ji1 && jglob2 < ji2+ny) jbeg = 0; if (jglob1 > ji1-ny && jglob2 <= ji2) jfin = ny; if (ji1 >= jglob1 && ji1 <= jglob2) jbeg = ji1; if (ji2 >= jglob1 && ji2 <= jglob2) jfin = ji2; ifin1 = ifin; jfin1 = jfin; if (ifin1 == ii2) ifin1 = ifin -1; if (jfin1 == ji2) jfin1 = jfin -1; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(k ,i ) for (i = 0; i <= ISIZ2+1; i++) { #pragma omp parallel for firstprivate(k ,i ) for (k = 0; k <= ISIZ3+1; k++) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } #pragma omp parallel for firstprivate(ibeg ,ifin ,j ,jbeg ,ki1 ,ki2 ,jfin ,i ) for (i = ibeg; i <= ifin; i++) { iglob = i; #pragma omp parallel for firstprivate(ibeg ,ifin ,j ,jbeg ,ki1 ,ki2 ,jfin ,i ) for (j = jbeg; j <= jfin; j++) { jglob = j; k = ki1; phi1[i][j] = C2*( u[i][j][k][4] - 0.50 * ( pow2(u[i][j][k][1]) + pow2(u[i][j][k][2]) + pow2(u[i][j][k][3]) ) / u[i][j][k][0] ); k = ki2; phi2[i][j] = C2*( u[i][j][k][4] - 0.50 * ( pow2(u[i][j][k][1]) + pow2(u[i][j][k][2]) + pow2(u[i][j][k][3]) ) / u[i][j][k][0] ); } } frc1 = 0.0; #pragma omp parallel for firstprivate(ibeg ,ifin1 ,j ,jbeg ,jfin1 ,i ) reduction(+:frc1) for (i = ibeg; i <= ifin1; i++) { #pragma omp parallel for firstprivate(ibeg ,ifin1 ,j ,jbeg ,jfin1 ,i ) reduction(+:frc1) for (j = jbeg; j <= jfin1; j++) { frc1 = frc1 + ( phi1[i][j] + phi1[i+1][j] + phi1[i][j+1] + phi1[i+1][j+1] + phi2[i][j] + phi2[i+1][j] + phi2[i][j+1] + phi2[i+1][j+1] ); } } frc1 = dxi * deta * frc1; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(k ,i ) for (i = 0; i <= ISIZ2+1; i++) { #pragma omp parallel for firstprivate(k ,i ) for (k = 0; k <= ISIZ3+1; k++) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } jglob = jbeg; if (jglob == ji1) { #pragma omp parallel for firstprivate(ibeg ,ifin ,k ,jbeg ,ki1 ,ki2 ,i ) for (i = ibeg; i <= ifin; i++) { iglob = i; #pragma omp parallel for firstprivate(ibeg ,ifin ,k ,jbeg ,ki1 ,ki2 ,i ) for (k = ki1; k <= ki2; k++) { phi1[i][k] = C2*( u[i][jbeg][k][4] - 0.50 * ( pow2(u[i][jbeg][k][1]) + pow2(u[i][jbeg][k][2]) + pow2(u[i][jbeg][k][3]) ) / u[i][jbeg][k][0] ); } } } jglob = jfin; if (jglob == ji2) { #pragma omp parallel for firstprivate(ibeg ,ifin ,k ,jfin ,ki1 ,ki2 ,i ) for (i = ibeg; i <= ifin; i++) { iglob = i; #pragma omp parallel for firstprivate(ibeg ,ifin ,k ,jfin ,ki1 ,ki2 ,i ) for (k = ki1; k <= ki2; k++) { phi2[i][k] = C2*( u[i][jfin][k][4] - 0.50 * ( pow2(u[i][jfin][k][1]) + pow2(u[i][jfin][k][2]) + pow2(u[i][jfin][k][3]) ) / u[i][jfin][k][0] ); } } } frc2 = 0.0; #pragma omp parallel for firstprivate(ibeg ,ifin1 ,k ,ki1 ,ki2 ,i ) reduction(+:frc2) for (i = ibeg; i <= ifin1; i++) { #pragma omp parallel for firstprivate(ibeg ,ifin1 ,k ,ki1 ,ki2 ,i ) reduction(+:frc2) for (k = ki1; k <= ki2-1; k++) { frc2 = frc2 + ( phi1[i][k] + phi1[i+1][k] + phi1[i][k+1] + phi1[i+1][k+1] + phi2[i][k] + phi2[i+1][k] + phi2[i][k+1] + phi2[i+1][k+1] ); } } frc2 = dxi * dzeta * frc2; /*-------------------------------------------------------------------- c initialize --------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(k ,i ) for (i = 0; i <= ISIZ2+1; i++) { #pragma omp parallel for firstprivate(k ,i ) for (k = 0; k <= ISIZ3+1; k++) { phi1[i][k] = 0.0; phi2[i][k] = 0.0; } } iglob = ibeg; if (iglob == ii1) { for (j = jbeg; j <= jfin; j++) { jglob = j; for (k = ki1; k <= ki2; k++) { phi1[j][k] = C2*( u[ibeg][j][k][4] - 0.50 * ( pow2(u[ibeg][j][k][1]) + pow2(u[ibeg][j][k][2]) + pow2(u[ibeg][j][k][3]) ) / u[ibeg][j][k][0] ); } } } iglob = ifin; if (iglob == ii2) { for (j = jbeg; j <= jfin; j++) { jglob = j; for (k = ki1; k <= ki2; k++) { phi2[j][k] = C2*( u[ifin][j][k][4] - 0.50 * ( pow2(u[ifin][j][k][1]) + pow2(u[ifin][j][k][2]) + pow2(u[ifin][j][k][3]) ) / u[ifin][j][k][0] ); } } } frc3 = 0.0; for (j = jbeg; j <= jfin1; j++) { for (k = ki1; k <= ki2-1; k++) { frc3 = frc3 + ( phi1[j][k] + phi1[j+1][k] + phi1[j][k+1] + phi1[j+1][k+1] + phi2[j][k] + phi2[j+1][k] + phi2[j][k+1] + phi2[j+1][k+1] ); } } frc3 = deta * dzeta * frc3; frc = 0.25 * ( frc1 + frc2 + frc3 ); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void read_input(void) { FILE *fp; /*-------------------------------------------------------------------- c if input file does not exist, it uses defaults c ipr = 1 for detailed progress output c inorm = how often the norm is printed (once every inorm iterations) c itmax = number of pseudo time steps c dt = time step c omega 1 over-relaxation factor for SSOR c tolrsd = steady state residual tolerance levels c nx, ny, nz = number of grid points in x, y, z directions --------------------------------------------------------------------*/ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - LU Benchmark\n\n"); fp = fopen("inputlu.data", "r"); if (fp != NULL) { printf(" Reading from input file inputlu.data\n"); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); fscanf(fp, "%d%d", &ipr, &inorm); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); fscanf(fp, "%d", &itmax); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); fscanf(fp, "%lf", &dt); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); fscanf(fp, "%lf", &omega); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); fscanf(fp, "%lf%lf%lf%lf%lf", &tolrsd[0], &tolrsd[1], &tolrsd[2], &tolrsd[3], &tolrsd[4]); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); while(fgetc(fp) != '\n'); fscanf(fp, "%d%d%d", &nx0, &ny0, &nz0); while(fgetc(fp) != '\n'); fclose(fp); } else { ipr = IPR_DEFAULT; inorm = INORM_DEFAULT; itmax = ITMAX_DEFAULT; dt = DT_DEFAULT; omega = OMEGA_DEFAULT; tolrsd[0] = TOLRSD1_DEF; tolrsd[1] = TOLRSD2_DEF; tolrsd[2] = TOLRSD3_DEF; tolrsd[3] = TOLRSD4_DEF; tolrsd[4] = TOLRSD5_DEF; nx0 = ISIZ1; ny0 = ISIZ2; nz0 = ISIZ3; } /*-------------------------------------------------------------------- c check problem size --------------------------------------------------------------------*/ if ( nx0 < 4 || ny0 < 4 || nz0 < 4 ) { printf(" PROBLEM SIZE IS TOO SMALL - \n" " SET EACH OF NX, NY AND NZ AT LEAST EQUAL TO 5\n"); exit(1); } if ( nx0 > ISIZ1 || ny0 > ISIZ2 || nz0 > ISIZ3 ) { printf(" PROBLEM SIZE IS TOO LARGE - \n" " NX, NY AND NZ SHOULD BE EQUAL TO \n" " ISIZ1, ISIZ2 AND ISIZ3 RESPECTIVELY\n"); exit(1); } printf(" Size: %3dx%3dx%3d\n", nx0, ny0, nz0); printf(" Iterations: %3d\n", itmax); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void rhs(void) { { /*-------------------------------------------------------------------- c compute the right hand sides --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k, m; int L1, L2; int ist1, iend1; int jst1, jend1; double q; double u21, u31, u41; double tmp; double u21i, u31i, u41i, u51i; double u21j, u31j, u41j, u51j; double u21k, u31k, u41k, u51k; double u21im1, u31im1, u41im1, u51im1; double u21jm1, u31jm1, u41jm1, u51jm1; double u21km1, u31km1, u41km1, u51km1; #pragma omp parallel for for (i = 0; i <= nx-1; i++) { #pragma omp parallel for firstprivate(nx ,j ,k ,m ,nz ,ny ,i ) for (j = 0; j <= ny-1; j++) { #pragma omp parallel for firstprivate(nx ,j ,k ,m ,nz ,ny ,i ) for (k = 0; k <= nz-1; k++) { #pragma omp parallel for firstprivate(nx ,j ,k ,m ,nz ,ny ,i ) for (m = 0; m < 5; m++) { rsd[i][j][k][m] = - frct[i][j][k][m]; } } } } /*-------------------------------------------------------------------- c xi-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = nx-1; #pragma omp parallel for for (i = L1; i <= L2; i++) { #pragma omp parallel for firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) for (j = jst; j <= jend; j++) { #pragma omp parallel for firstprivate(L2 ,nx ,j ,k ,u21 ,q ,nz ,jst ,jend ,i ) for (k = 1; k <= nz - 2; k++) { flux[i][j][k][0] = u[i][j][k][1]; u21 = u[i][j][k][1] / u[i][j][k][0]; q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u21 + C2 * ( u[i][j][k][4] - q ); flux[i][j][k][2] = u[i][j][k][2] * u21; flux[i][j][k][3] = u[i][j][k][3] * u21; flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u21; } } } #pragma omp parallel for for (j = jst; j <= jend; j++) { #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) for (k = 1; k <= nz - 2; k++) { #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) for (i = ist; i <= iend; i++) { #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) for (m = 0; m < 5; m++) { rsd[i][j][k][m] = rsd[i][j][k][m] - tx2 * ( flux[i+1][j][k][m] - flux[i-1][j][k][m] ); } } L2 = nx-1; #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) for (i = ist; i <= L2; i++) { tmp = 1.0 / u[i][j][k][0]; u21i = tmp * u[i][j][k][1]; u31i = tmp * u[i][j][k][2]; u41i = tmp * u[i][j][k][3]; u51i = tmp * u[i][j][k][4]; tmp = 1.0 / u[i-1][j][k][0]; u21im1 = tmp * u[i-1][j][k][1]; u31im1 = tmp * u[i-1][j][k][2]; u41im1 = tmp * u[i-1][j][k][3]; u51im1 = tmp * u[i-1][j][k][4]; flux[i][j][k][1] = (4.0/3.0) * tx3 * (u21i-u21im1); flux[i][j][k][2] = tx3 * ( u31i - u31im1 ); flux[i][j][k][3] = tx3 * ( u41i - u41im1 ); flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 ) * tx3 * ( ( pow2(u21i) + pow2(u31i) + pow2(u41i) ) - ( pow2(u21im1) + pow2(u31im1) + pow2(u41im1) ) ) + (1.0/6.0) * tx3 * ( pow2(u21i) - pow2(u21im1) ) + C1 * C5 * tx3 * ( u51i - u51im1 ); } #pragma omp parallel for firstprivate(jend ,m ,i ,jst ,k ,u21i ,u31i ,u41i ,u51i ,tmp ,u21im1 ,u31im1 ,u41im1 ,u51im1 ,tx2 ,ist ,iend ,tx3 ,L2 ,tx1 ,dx1 ,dx2 ,dx3 ,dx4 ,dx5 ,dssp ,nx ,nz ,j ) for (i = ist; i <= iend; i++) { rsd[i][j][k][0] = rsd[i][j][k][0] + dx1 * tx1 * ( u[i-1][j][k][0] - 2.0 * u[i][j][k][0] + u[i+1][j][k][0] ); rsd[i][j][k][1] = rsd[i][j][k][1] + tx3 * C3 * C4 * ( flux[i+1][j][k][1] - flux[i][j][k][1] ) + dx2 * tx1 * ( u[i-1][j][k][1] - 2.0 * u[i][j][k][1] + u[i+1][j][k][1] ); rsd[i][j][k][2] = rsd[i][j][k][2] + tx3 * C3 * C4 * ( flux[i+1][j][k][2] - flux[i][j][k][2] ) + dx3 * tx1 * ( u[i-1][j][k][2] - 2.0 * u[i][j][k][2] + u[i+1][j][k][2] ); rsd[i][j][k][3] = rsd[i][j][k][3] + tx3 * C3 * C4 * ( flux[i+1][j][k][3] - flux[i][j][k][3] ) + dx4 * tx1 * ( u[i-1][j][k][3] - 2.0 * u[i][j][k][3] + u[i+1][j][k][3] ); rsd[i][j][k][4] = rsd[i][j][k][4] + tx3 * C3 * C4 * ( flux[i+1][j][k][4] - flux[i][j][k][4] ) + dx5 * tx1 * ( u[i-1][j][k][4] - 2.0 * u[i][j][k][4] + u[i+1][j][k][4] ); } /*-------------------------------------------------------------------- c Fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { rsd[1][j][k][m] = rsd[1][j][k][m] - dssp * ( + 5.0 * u[1][j][k][m] - 4.0 * u[2][j][k][m] + u[3][j][k][m] ); rsd[2][j][k][m] = rsd[2][j][k][m] - dssp * ( - 4.0 * u[1][j][k][m] + 6.0 * u[2][j][k][m] - 4.0 * u[3][j][k][m] + u[4][j][k][m] ); } ist1 = 3; iend1 = nx - 4; for (i = ist1; i <= iend1; i++) { for (m = 0; m < 5; m++) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * ( u[i-2][j][k][m] - 4.0 * u[i-1][j][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i+1][j][k][m] + u[i+2][j][k][m] ); } } for (m = 0; m < 5; m++) { rsd[nx-3][j][k][m] = rsd[nx-3][j][k][m] - dssp * ( u[nx-5][j][k][m] - 4.0 * u[nx-4][j][k][m] + 6.0 * u[nx-3][j][k][m] - 4.0 * u[nx-2][j][k][m] ); rsd[nx-2][j][k][m] = rsd[nx-2][j][k][m] - dssp * ( u[nx-4][j][k][m] - 4.0 * u[nx-3][j][k][m] + 5.0 * u[nx-2][j][k][m] ); } } } /*-------------------------------------------------------------------- c eta-direction flux differences --------------------------------------------------------------------*/ L1 = 0; L2 = ny-1; #pragma omp parallel for for (i = ist; i <= iend; i++) { #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) for (j = L1; j <= L2; j++) { #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,ny ,u31 ,q ,nz ,L2 ,i ) for (k = 1; k <= nz - 2; k++) { flux[i][j][k][0] = u[i][j][k][2]; u31 = u[i][j][k][2] / u[i][j][k][0]; q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u31; flux[i][j][k][2] = u[i][j][k][2] * u31 + C2 * (u[i][j][k][4]-q); flux[i][j][k][3] = u[i][j][k][3] * u31; flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u31; } } } #pragma omp parallel for for (i = ist; i <= iend; i++) { #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) for (k = 1; k <= nz - 2; k++) { #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) for (j = jst; j <= jend; j++) { #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) for (m = 0; m < 5; m++) { rsd[i][j][k][m] = rsd[i][j][k][m] - ty2 * ( flux[i][j+1][k][m] - flux[i][j-1][k][m] ); } } L2 = ny-1; #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) for (j = jst; j <= L2; j++) { tmp = 1.0 / u[i][j][k][0]; u21j = tmp * u[i][j][k][1]; u31j = tmp * u[i][j][k][2]; u41j = tmp * u[i][j][k][3]; u51j = tmp * u[i][j][k][4]; tmp = 1.0 / u[i][j-1][k][0]; u21jm1 = tmp * u[i][j-1][k][1]; u31jm1 = tmp * u[i][j-1][k][2]; u41jm1 = tmp * u[i][j-1][k][3]; u51jm1 = tmp * u[i][j-1][k][4]; flux[i][j][k][1] = ty3 * ( u21j - u21jm1 ); flux[i][j][k][2] = (4.0/3.0) * ty3 * (u31j-u31jm1); flux[i][j][k][3] = ty3 * ( u41j - u41jm1 ); flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 ) * ty3 * ( ( pow2(u21j) + pow2(u31j) + pow2(u41j) ) - ( pow2(u21jm1) + pow2(u31jm1) + pow2(u41jm1) ) ) + (1.0/6.0) * ty3 * ( pow2(u31j) - pow2(u31jm1) ) + C1 * C5 * ty3 * ( u51j - u51jm1 ); } #pragma omp parallel for firstprivate(iend ,m ,j ,ist ,k ,u21j ,u31j ,u41j ,u51j ,tmp ,u21jm1 ,u31jm1 ,u41jm1 ,u51jm1 ,ty2 ,jst ,jend ,ty3 ,L2 ,ty1 ,dy1 ,dy2 ,dy3 ,dy4 ,dy5 ,dssp ,ny ,nz ,i ) for (j = jst; j <= jend; j++) { rsd[i][j][k][0] = rsd[i][j][k][0] + dy1 * ty1 * ( u[i][j-1][k][0] - 2.0 * u[i][j][k][0] + u[i][j+1][k][0] ); rsd[i][j][k][1] = rsd[i][j][k][1] + ty3 * C3 * C4 * ( flux[i][j+1][k][1] - flux[i][j][k][1] ) + dy2 * ty1 * ( u[i][j-1][k][1] - 2.0 * u[i][j][k][1] + u[i][j+1][k][1] ); rsd[i][j][k][2] = rsd[i][j][k][2] + ty3 * C3 * C4 * ( flux[i][j+1][k][2] - flux[i][j][k][2] ) + dy3 * ty1 * ( u[i][j-1][k][2] - 2.0 * u[i][j][k][2] + u[i][j+1][k][2] ); rsd[i][j][k][3] = rsd[i][j][k][3] + ty3 * C3 * C4 * ( flux[i][j+1][k][3] - flux[i][j][k][3] ) + dy4 * ty1 * ( u[i][j-1][k][3] - 2.0 * u[i][j][k][3] + u[i][j+1][k][3] ); rsd[i][j][k][4] = rsd[i][j][k][4] + ty3 * C3 * C4 * ( flux[i][j+1][k][4] - flux[i][j][k][4] ) + dy5 * ty1 * ( u[i][j-1][k][4] - 2.0 * u[i][j][k][4] + u[i][j+1][k][4] ); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { rsd[i][1][k][m] = rsd[i][1][k][m] - dssp * ( + 5.0 * u[i][1][k][m] - 4.0 * u[i][2][k][m] + u[i][3][k][m] ); rsd[i][2][k][m] = rsd[i][2][k][m] - dssp * ( - 4.0 * u[i][1][k][m] + 6.0 * u[i][2][k][m] - 4.0 * u[i][3][k][m] + u[i][4][k][m] ); } jst1 = 3; jend1 = ny - 4; for (j = jst1; j <= jend1; j++) { for (m = 0; m < 5; m++) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * ( u[i][j-2][k][m] - 4.0 * u[i][j-1][k][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j+1][k][m] + u[i][j+2][k][m] ); } } for (m = 0; m < 5; m++) { rsd[i][ny-3][k][m] = rsd[i][ny-3][k][m] - dssp * ( u[i][ny-5][k][m] - 4.0 * u[i][ny-4][k][m] + 6.0 * u[i][ny-3][k][m] - 4.0 * u[i][ny-2][k][m] ); rsd[i][ny-2][k][m] = rsd[i][ny-2][k][m] - dssp * ( u[i][ny-4][k][m] - 4.0 * u[i][ny-3][k][m] + 5.0 * u[i][ny-2][k][m] ); } } } /*-------------------------------------------------------------------- c zeta-direction flux differences --------------------------------------------------------------------*/ #pragma omp parallel for for (i = ist; i <= iend; i++) { for (j = jst; j <= jend; j++) { #pragma omp parallel for firstprivate(nz ,ist ,jst ,u41 ,q ,k ,j ,i ) for (k = 0; k <= nz-1; k++) { flux[i][j][k][0] = u[i][j][k][3]; u41 = u[i][j][k][3] / u[i][j][k][0]; q = 0.50 * ( u[i][j][k][1] * u[i][j][k][1] + u[i][j][k][2] * u[i][j][k][2] + u[i][j][k][3] * u[i][j][k][3] ) / u[i][j][k][0]; flux[i][j][k][1] = u[i][j][k][1] * u41; flux[i][j][k][2] = u[i][j][k][2] * u41; flux[i][j][k][3] = u[i][j][k][3] * u41 + C2 * (u[i][j][k][4]-q); flux[i][j][k][4] = ( C1 * u[i][j][k][4] - C2 * q ) * u41; } #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) for (k = 1; k <= nz - 2; k++) { #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,tz2 ,k ,j ,i ) for (m = 0; m < 5; m++) { rsd[i][j][k][m] = rsd[i][j][k][m] - tz2 * ( flux[i][j][k+1][m] - flux[i][j][k-1][m] ); } } #pragma omp parallel for firstprivate(nz ,ist ,jst ,u21k ,u31k ,u41k ,u51k ,tmp ,u21km1 ,u31km1 ,u41km1 ,u51km1 ,tz3 ,k ,j ,i ) for (k = 1; k <= nz-1; k++) { tmp = 1.0 / u[i][j][k][0]; u21k = tmp * u[i][j][k][1]; u31k = tmp * u[i][j][k][2]; u41k = tmp * u[i][j][k][3]; u51k = tmp * u[i][j][k][4]; tmp = 1.0 / u[i][j][k-1][0]; u21km1 = tmp * u[i][j][k-1][1]; u31km1 = tmp * u[i][j][k-1][2]; u41km1 = tmp * u[i][j][k-1][3]; u51km1 = tmp * u[i][j][k-1][4]; flux[i][j][k][1] = tz3 * ( u21k - u21km1 ); flux[i][j][k][2] = tz3 * ( u31k - u31km1 ); flux[i][j][k][3] = (4.0/3.0) * tz3 * (u41k-u41km1); flux[i][j][k][4] = 0.50 * ( 1.0 - C1*C5 ) * tz3 * ( ( pow2(u21k) + pow2(u31k) + pow2(u41k) ) - ( pow2(u21km1) + pow2(u31km1) + pow2(u41km1) ) ) + (1.0/6.0) * tz3 * ( pow2(u41k) - pow2(u41km1) ) + C1 * C5 * tz3 * ( u51k - u51km1 ); } #pragma omp parallel for firstprivate(nz ,ist ,jst ,tz1 ,dz1 ,dz2 ,tz3 ,dz3 ,dz4 ,dz5 ,k ,j ,i ) for (k = 1; k <= nz - 2; k++) { rsd[i][j][k][0] = rsd[i][j][k][0] + dz1 * tz1 * ( u[i][j][k-1][0] - 2.0 * u[i][j][k][0] + u[i][j][k+1][0] ); rsd[i][j][k][1] = rsd[i][j][k][1] + tz3 * C3 * C4 * ( flux[i][j][k+1][1] - flux[i][j][k][1] ) + dz2 * tz1 * ( u[i][j][k-1][1] - 2.0 * u[i][j][k][1] + u[i][j][k+1][1] ); rsd[i][j][k][2] = rsd[i][j][k][2] + tz3 * C3 * C4 * ( flux[i][j][k+1][2] - flux[i][j][k][2] ) + dz3 * tz1 * ( u[i][j][k-1][2] - 2.0 * u[i][j][k][2] + u[i][j][k+1][2] ); rsd[i][j][k][3] = rsd[i][j][k][3] + tz3 * C3 * C4 * ( flux[i][j][k+1][3] - flux[i][j][k][3] ) + dz4 * tz1 * ( u[i][j][k-1][3] - 2.0 * u[i][j][k][3] + u[i][j][k+1][3] ); rsd[i][j][k][4] = rsd[i][j][k][4] + tz3 * C3 * C4 * ( flux[i][j][k+1][4] - flux[i][j][k][4] ) + dz5 * tz1 * ( u[i][j][k-1][4] - 2.0 * u[i][j][k][4] + u[i][j][k+1][4] ); } /*-------------------------------------------------------------------- c fourth-order dissipation --------------------------------------------------------------------*/ for (m = 0; m < 5; m++) { rsd[i][j][1][m] = rsd[i][j][1][m] - dssp * ( + 5.0 * u[i][j][1][m] - 4.0 * u[i][j][2][m] + u[i][j][3][m] ); rsd[i][j][2][m] = rsd[i][j][2][m] - dssp * ( - 4.0 * u[i][j][1][m] + 6.0 * u[i][j][2][m] - 4.0 * u[i][j][3][m] + u[i][j][4][m] ); } #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) for (k = 3; k <= nz - 4; k++) { #pragma omp parallel for firstprivate(nz ,ist ,jst ,m ,dssp ,k ,j ,i ) for (m = 0; m < 5; m++) { rsd[i][j][k][m] = rsd[i][j][k][m] - dssp * ( u[i][j][k-2][m] - 4.0 * u[i][j][k-1][m] + 6.0 * u[i][j][k][m] - 4.0 * u[i][j][k+1][m] + u[i][j][k+2][m] ); } } for (m = 0; m < 5; m++) { rsd[i][j][nz-3][m] = rsd[i][j][nz-3][m] - dssp * ( u[i][j][nz-5][m] - 4.0 * u[i][j][nz-4][m] + 6.0 * u[i][j][nz-3][m] - 4.0 * u[i][j][nz-2][m] ); rsd[i][j][nz-2][m] = rsd[i][j][nz-2][m] - dssp * ( u[i][j][nz-4][m] - 4.0 * u[i][j][nz-3][m] + 5.0 * u[i][j][nz-2][m] ); } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setbv(void) { { /*-------------------------------------------------------------------- c set the boundary values of dependent variables --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k; int iglob, jglob; /*-------------------------------------------------------------------- c set the dependent variable values along the top and bottom faces --------------------------------------------------------------------*/ #pragma omp parallel for for (i = 0; i < nx; i++) { iglob = i; for (j = 0; j < ny; j++) { jglob = j; exact( iglob, jglob, 0, &u[i][j][0][0] ); exact( iglob, jglob, nz-1, &u[i][j][nz-1][0] ); } } /*-------------------------------------------------------------------- c set the dependent variable values along north and south faces --------------------------------------------------------------------*/ #pragma omp parallel for for (i = 0; i < nx; i++) { iglob = i; for (k = 0; k < nz; k++) { exact( iglob, 0, k, &u[i][0][k][0] ); } } #pragma omp parallel for for (i = 0; i < nx; i++) { iglob = i; for (k = 0; k < nz; k++) { exact( iglob, ny0-1, k, &u[i][ny-1][k][0] ); } } /*-------------------------------------------------------------------- c set the dependent variable values along east and west faces --------------------------------------------------------------------*/ #pragma omp parallel for for (j = 0; j < ny; j++) { jglob = j; for (k = 0; k < nz; k++) { exact( 0, jglob, k, &u[0][j][k][0] ); } } #pragma omp parallel for for (j = 0; j < ny; j++) { jglob = j; for (k = 0; k < nz; k++) { exact( nx0-1, jglob, k, &u[nx-1][j][k][0] ); } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setcoeff(void) { /*-------------------------------------------------------------------- c set up coefficients --------------------------------------------------------------------*/ dxi = 1.0 / ( nx0 - 1 ); deta = 1.0 / ( ny0 - 1 ); dzeta = 1.0 / ( nz0 - 1 ); tx1 = 1.0 / ( dxi * dxi ); tx2 = 1.0 / ( 2.0 * dxi ); tx3 = 1.0 / dxi; ty1 = 1.0 / ( deta * deta ); ty2 = 1.0 / ( 2.0 * deta ); ty3 = 1.0 / deta; tz1 = 1.0 / ( dzeta * dzeta ); tz2 = 1.0 / ( 2.0 * dzeta ); tz3 = 1.0 / dzeta; ii1 = 1; ii2 = nx0 - 2; ji1 = 1; ji2 = ny0 - 3; ki1 = 2; ki2 = nz0 - 2; /*-------------------------------------------------------------------- c diffusion coefficients --------------------------------------------------------------------*/ dx1 = 0.75; dx2 = dx1; dx3 = dx1; dx4 = dx1; dx5 = dx1; dy1 = 0.75; dy2 = dy1; dy3 = dy1; dy4 = dy1; dy5 = dy1; dz1 = 1.00; dz2 = dz1; dz3 = dz1; dz4 = dz1; dz5 = dz1; /*-------------------------------------------------------------------- c fourth difference dissipation --------------------------------------------------------------------*/ dssp = ( max (dx1, max(dy1, dz1) ) ) / 4.0; /*-------------------------------------------------------------------- c coefficients of the exact solution to the first pde --------------------------------------------------------------------*/ ce[0][0] = 2.0; ce[0][1] = 0.0; ce[0][2] = 0.0; ce[0][3] = 4.0; ce[0][4] = 5.0; ce[0][5] = 3.0; ce[0][6] = 5.0e-01; ce[0][7] = 2.0e-02; ce[0][8] = 1.0e-02; ce[0][9] = 3.0e-02; ce[0][10] = 5.0e-01; ce[0][11] = 4.0e-01; ce[0][12] = 3.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the second pde --------------------------------------------------------------------*/ ce[1][0] = 1.0; ce[1][1] = 0.0; ce[1][2] = 0.0; ce[1][3] = 0.0; ce[1][4] = 1.0; ce[1][5] = 2.0; ce[1][6] = 3.0; ce[1][7] = 1.0e-02; ce[1][8] = 3.0e-02; ce[1][9] = 2.0e-02; ce[1][10] = 4.0e-01; ce[1][11] = 3.0e-01; ce[1][12] = 5.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the third pde --------------------------------------------------------------------*/ ce[2][0] = 2.0; ce[2][1] = 2.0; ce[2][2] = 0.0; ce[2][3] = 0.0; ce[2][4] = 0.0; ce[2][5] = 2.0; ce[2][6] = 3.0; ce[2][7] = 4.0e-02; ce[2][8] = 3.0e-02; ce[2][9] = 5.0e-02; ce[2][10] = 3.0e-01; ce[2][11] = 5.0e-01; ce[2][12] = 4.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the fourth pde --------------------------------------------------------------------*/ ce[3][0] = 2.0; ce[3][1] = 2.0; ce[3][2] = 0.0; ce[3][3] = 0.0; ce[3][4] = 0.0; ce[3][5] = 2.0; ce[3][6] = 3.0; ce[3][7] = 3.0e-02; ce[3][8] = 5.0e-02; ce[3][9] = 4.0e-02; ce[3][10] = 2.0e-01; ce[3][11] = 1.0e-01; ce[3][12] = 3.0e-01; /*-------------------------------------------------------------------- c coefficients of the exact solution to the fifth pde --------------------------------------------------------------------*/ ce[4][0] = 5.0; ce[4][1] = 4.0; ce[4][2] = 3.0; ce[4][3] = 2.0; ce[4][4] = 1.0e-01; ce[4][5] = 4.0e-01; ce[4][6] = 3.0e-01; ce[4][7] = 5.0e-02; ce[4][8] = 4.0e-02; ce[4][9] = 3.0e-02; ce[4][10] = 1.0e-01; ce[4][11] = 3.0e-01; ce[4][12] = 2.0e-01; } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void setiv(void) { { /*-------------------------------------------------------------------- c c set the initial values of independent variables based on tri-linear c interpolation of boundary values in the computational space. c --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k, m; int iglob, jglob; double xi, eta, zeta; double pxi, peta, pzeta; double ue_1jk[5],ue_nx0jk[5],ue_i1k[5], ue_iny0k[5],ue_ij1[5],ue_ijnz[5]; #pragma omp parallel for for (j = 0; j < ny; j++) { jglob = j; for (k = 1; k < nz - 1; k++) { zeta = ((double)k) / (nz-1); if (jglob != 0 && jglob != ny0-1) { eta = ( (double) (jglob) ) / (ny0-1); for (i = 0; i < nx; i++) { iglob = i; if(iglob != 0 && iglob != nx0-1) { xi = ( (double) (iglob) ) / (nx0-1); exact (0,jglob,k,ue_1jk); exact (nx0-1,jglob,k,ue_nx0jk); exact (iglob,0,k,ue_i1k); exact (iglob,ny0-1,k,ue_iny0k); exact (iglob,jglob,0,ue_ij1); exact (iglob,jglob,nz-1,ue_ijnz); #pragma omp parallel for firstprivate(pxi ,peta ,pzeta ,xi ,eta ,zeta ,m ,i ,k ,j ) for (m = 0; m < 5; m++) { pxi = ( 1.0 - xi ) * ue_1jk[m] + xi * ue_nx0jk[m]; peta = ( 1.0 - eta ) * ue_i1k[m] + eta * ue_iny0k[m]; pzeta = ( 1.0 - zeta ) * ue_ij1[m] + zeta * ue_ijnz[m]; u[i][j][k][m] = pxi + peta + pzeta - pxi * peta - peta * pzeta - pzeta * pxi + pxi * peta * pzeta; } } } } } } } } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void ssor(void) { /*-------------------------------------------------------------------- c to perform pseudo-time stepping SSOR iterations c for five nonlinear pde s. --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c local variables --------------------------------------------------------------------*/ int i, j, k, m; int istep; double tmp; double delunm[5], tv[ISIZ1][ISIZ2][5]; /*-------------------------------------------------------------------- c begin pseudo-time stepping iterations --------------------------------------------------------------------*/ tmp = 1.0 / ( omega * ( 2.0 - omega ) ) ; /*-------------------------------------------------------------------- c initialize a,b,c,d to zero (guarantees that page tables have been c formed, if applicable on given architecture, before timestepping). --------------------------------------------------------------------*/ { #pragma omp parallel for firstprivate(j ,k ,m ,i ) for (i = 0; i < ISIZ1; i++) { #pragma omp parallel for private(istep ,i ,j ,k ,m ) for (j = 0; j < ISIZ2; j++) { #pragma omp parallel for firstprivate(j ,k ,m ,i ) for (k = 0; k < 5; k++) { #pragma omp parallel for firstprivate(j ,k ,m ,i ) for (m = 0; m < 5; m++) { a[i][j][k][m] = 0.0; b[i][j][k][m] = 0.0; c[i][j][k][m] = 0.0; d[i][j][k][m] = 0.0; } } } } } /*-------------------------------------------------------------------- c compute the steady-state residuals --------------------------------------------------------------------*/ rhs(); /*-------------------------------------------------------------------- c compute the L2 norms of newton iteration residuals --------------------------------------------------------------------*/ l2norm( nx0, ny0, nz0, ist, iend, jst, jend, rsd, rsdnm ); timer_clear(1); timer_start(1); /*-------------------------------------------------------------------- c the timestep loop --------------------------------------------------------------------*/ for (istep = 1; istep <= itmax; istep++) { if (istep%20 == 0 || istep == itmax || istep == 1) { printf(" Time step %4d\n", istep); } { /*-------------------------------------------------------------------- c perform SSOR iteration --------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,m ,dt ,nz ,jst ,jend ,i ,istep ) for (i = ist; i <= iend; i++) { #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,m ,dt ,nz ,jst ,jend ,i ,istep ) for (j = jst; j <= jend; j++) { #pragma omp parallel for private(istep ,i ,j ,k ,m ) for (k = 1; k <= nz - 2; k++) { #pragma omp parallel for firstprivate(iend ,ist ,j ,k ,m ,dt ,nz ,jst ,jend ,i ,istep ) for (m = 0; m < 5; m++) { rsd[i][j][k][m] = dt * rsd[i][j][k][m]; } } } } for (k = 1; k <= nz - 2; k++) { /*-------------------------------------------------------------------- c form the lower triangular part of the jacobian matrix --------------------------------------------------------------------*/ jacld(k); /*-------------------------------------------------------------------- c perform the lower triangular solution --------------------------------------------------------------------*/ blts(nx, ny, nz, k, omega, rsd, a, b, c, d, ist, iend, jst, jend, nx0, ny0 ); } for (k = nz - 2; k >= 1; k--) { /*-------------------------------------------------------------------- c form the strictly upper triangular part of the jacobian matrix --------------------------------------------------------------------*/ jacu(k); /*-------------------------------------------------------------------- c perform the upper triangular solution --------------------------------------------------------------------*/ buts(nx, ny, nz, k, omega, rsd, tv, d, a, b, c, ist, iend, jst, jend, nx0, ny0 ); } /*-------------------------------------------------------------------- c update the variables --------------------------------------------------------------------*/ for (i = ist; i <= iend; i++) { for (j = jst; j <= jend; j++) { for (k = 1; k <= nz-2; k++) { for (m = 0; m < 5; m++) { u[i][j][k][m] = u[i][j][k][m] + tmp * rsd[i][j][k][m]; } } } } } /* end parallel */ /*-------------------------------------------------------------------- c compute the max-norms of newton iteration corrections --------------------------------------------------------------------*/ if ( istep % inorm == 0 ) { l2norm( nx0, ny0, nz0, ist, iend, jst, jend, rsd, delunm ); } /*-------------------------------------------------------------------- c compute the steady-state residuals --------------------------------------------------------------------*/ rhs(); /*-------------------------------------------------------------------- c compute the max-norms of newton iteration residuals --------------------------------------------------------------------*/ if ( ( istep % inorm == 0 ) || ( istep == itmax ) ) { l2norm( nx0, ny0, nz0, ist, iend, jst, jend, rsd, rsdnm ); } /*-------------------------------------------------------------------- c check the newton-iteration residuals against the tolerance levels --------------------------------------------------------------------*/ if ( ( rsdnm[0] < tolrsd[0] ) && ( rsdnm[1] < tolrsd[1] ) && ( rsdnm[2] < tolrsd[2] ) && ( rsdnm[3] < tolrsd[3] ) && ( rsdnm[4] < tolrsd[4] ) ) { exit(1); } } timer_stop(1); maxtime= timer_read(1); } /*-------------------------------------------------------------------- --------------------------------------------------------------------*/ static void verify(double xcr[5], double xce[5], double xci, char *class, boolean *verified) { /*-------------------------------------------------------------------- c verification routine --------------------------------------------------------------------*/ double xcrref[5],xceref[5],xciref, xcrdif[5],xcedif[5],xcidif, epsilon, dtref; int m; /*-------------------------------------------------------------------- c tolerance level --------------------------------------------------------------------*/ epsilon = 1.0e-08; *class = 'U'; *verified = TRUE; #pragma omp parallel for firstprivate(m ) for (m = 0; m < 5; m++) { xcrref[m] = 1.0; xceref[m] = 1.0; } xciref = 1.0; if ( nx0 == 12 && ny0 == 12 && nz0 == 12 && itmax == 50) { *class = 'S'; dtref = 5.0e-1; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xcrref[0] = 1.6196343210976702e-02; xcrref[1] = 2.1976745164821318e-03; xcrref[2] = 1.5179927653399185e-03; xcrref[3] = 1.5029584435994323e-03; xcrref[4] = 3.4264073155896461e-02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xceref[0] = 6.4223319957960924e-04; xceref[1] = 8.4144342047347926e-05; xceref[2] = 5.8588269616485186e-05; xceref[3] = 5.8474222595157350e-05; xceref[4] = 1.3103347914111294e-03; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (12X12X12) grid, c after 50 time steps, with DT = 5.0d-01 --------------------------------------------------------------------*/ xciref = 7.8418928865937083; } else if ( nx0 == 33 && ny0 == 33 && nz0 == 33 && itmax == 300) { *class = 'W'; /* SPEC95fp size */ dtref = 1.5e-3; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (33x33x33) grid, c after 300 time steps, with DT = 1.5d-3 --------------------------------------------------------------------*/ xcrref[0] = 0.1236511638192e+02; xcrref[1] = 0.1317228477799e+01; xcrref[2] = 0.2550120713095e+01; xcrref[3] = 0.2326187750252e+01; xcrref[4] = 0.2826799444189e+02; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (33X33X33) grid, --------------------------------------------------------------------*/ xceref[0] = 0.4867877144216; xceref[1] = 0.5064652880982e-01; xceref[2] = 0.9281818101960e-01; xceref[3] = 0.8570126542733e-01; xceref[4] = 0.1084277417792e+01; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (33X33X33) grid, c after 300 time steps, with DT = 1.5d-3 --------------------------------------------------------------------*/ xciref = 0.1161399311023e+02; } else if ( nx0 == 64 && ny0 == 64 && nz0 == 64 && itmax == 250) { *class = 'A'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 7.7902107606689367e+02; xcrref[1] = 6.3402765259692870e+01; xcrref[2] = 1.9499249727292479e+02; xcrref[3] = 1.7845301160418537e+02; xcrref[4] = 1.8384760349464247e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 2.9964085685471943e+01; xceref[1] = 2.8194576365003349; xceref[2] = 7.3473412698774742; xceref[3] = 6.7139225687777051; xceref[4] = 7.0715315688392578e+01; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (64X64X64) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 2.6030925604886277e+01; } else if ( nx0 == 102 && ny0 == 102 && nz0 == 102 && itmax == 250) { *class = 'B'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (102X102X102) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 3.5532672969982736e+03; xcrref[1] = 2.6214750795310692e+02; xcrref[2] = 8.8333721850952190e+02; xcrref[3] = 7.7812774739425265e+02; xcrref[4] = 7.3087969592545314e+03; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (102X102X102) c grid, after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 1.1401176380212709e+02; xceref[1] = 8.1098963655421574; xceref[2] = 2.8480597317698308e+01; xceref[3] = 2.5905394567832939e+01; xceref[4] = 2.6054907504857413e+02; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (102X102X102) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 4.7887162703308227e+01; } else if ( nx0 == 162 && ny0 == 162 && nz0 == 162 && itmax == 250) { *class = 'C'; dtref = 2.0e+0; /*-------------------------------------------------------------------- c Reference values of RMS-norms of residual, for the (162X162X162) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xcrref[0] = 1.03766980323537846e+04; xcrref[1] = 8.92212458801008552e+02; xcrref[2] = 2.56238814582660871e+03; xcrref[3] = 2.19194343857831427e+03; xcrref[4] = 1.78078057261061185e+04; /*-------------------------------------------------------------------- c Reference values of RMS-norms of solution error, for the (162X162X162) c grid, after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xceref[0] = 2.15986399716949279e+02; xceref[1] = 1.55789559239863600e+01; xceref[2] = 5.41318863077207766e+01; xceref[3] = 4.82262643154045421e+01; xceref[4] = 4.55902910043250358e+02; /*-------------------------------------------------------------------- c Reference value of surface integral, for the (162X162X162) grid, c after 250 time steps, with DT = 2.0d+0.0 --------------------------------------------------------------------*/ xciref = 6.66404553572181300e+01; } else { *verified = FALSE; } /*-------------------------------------------------------------------- c verification test for residuals if gridsize is either 12X12X12 or c 64X64X64 or 102X102X102 or 162X162X162 --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Compute the difference of solution values and the known reference values. --------------------------------------------------------------------*/ #pragma omp parallel for firstprivate(xcr ,xce ,m ) for (m = 0; m < 5; m++) { xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]); xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]); } xcidif = fabs((xci - xciref)/xciref); /*-------------------------------------------------------------------- c Output the comparison of computed results to known cases. --------------------------------------------------------------------*/ if (*class != 'U') { printf("\n Verification being performed for class %1c\n", *class); printf(" Accuracy setting for epsilon = %20.13e\n", epsilon); if (fabs(dt-dtref) > epsilon) { *verified = FALSE; *class = 'U'; printf(" DT does not match the reference value of %15.8e\n", dtref); } } else { printf(" Unknown class\n"); } if (*class != 'U') { printf(" Comparison of RMS-norms of residual\n"); } else { printf(" RMS-norms of residual\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { printf(" %2d %20.13e\n", m, xcr[m]); } else if (xcrdif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n", m,xcr[m],xcrref[m],xcrdif[m]); } else { printf(" %2d %20.13e%20.13e%20.13e\n", m,xcr[m],xcrref[m],xcrdif[m]); } } if (*class != 'U') { printf(" Comparison of RMS-norms of solution error\n"); } else { printf(" RMS-norms of solution error\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { printf(" %2d %20.13e\n", m, xce[m]); } else if (xcedif[m] > epsilon) { *verified = FALSE; printf(" FAILURE: %2d %20.13e%20.13e%20.13e\n", m,xce[m],xceref[m],xcedif[m]); } else { printf(" %2d %20.13e%20.13e%20.13e\n", m,xce[m],xceref[m],xcedif[m]); } } if (*class != 'U') { printf(" Comparison of surface integral\n"); } else { printf(" Surface integral\n"); } if (*class == 'U') { printf(" %20.13e\n", xci); } else if (xcidif > epsilon) { *verified = FALSE; printf(" FAILURE: %20.13e%20.13e%20.13e\n", xci, xciref, xcidif); } else { printf(" %20.13e%20.13e%20.13e\n", xci, xciref, xcidif); } if (*class == 'U') { printf(" No reference values provided\n"); printf(" No verification performed\n"); } else if (*verified) { printf(" Verification Successful\n"); } else { printf(" Verification failed\n"); } }
tree.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_TREE_H_ #define LIGHTGBM_TREE_H_ #include <LightGBM/dataset.h> #include <LightGBM/meta.h> #include <string> #include <map> #include <memory> #include <unordered_map> #include <vector> namespace LightGBM { #define kCategoricalMask (1) #define kDefaultLeftMask (2) /*! * \brief Tree model */ class Tree { public: /*! * \brief Constructor * \param max_leaves The number of max leaves */ explicit Tree(int max_leaves); /*! * \brief Constructor, from a string * \param str Model string * \param used_len used count of str */ Tree(const char* str, size_t* used_len); ~Tree(); /*! * \brief Performing a split on tree leaves. * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param real_feature Index of feature, the original index on data * \param threshold_bin Threshold(bin) of split * \param threshold_double Threshold on feature value * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param left_weight Weight of left child * \param right_weight Weight of right child * \param gain Split gain * \param missing_type missing type * \param default_left default direction for missing value * \return The index of new leaf. */ int Split(int leaf, int feature, int real_feature, uint32_t threshold_bin, double threshold_double, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type, bool default_left); /*! * \brief Performing a split on tree leaves, with categorical feature * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param real_feature Index of feature, the original index on data * \param threshold_bin Threshold(bin) of split, use bitset to represent * \param num_threshold_bin size of threshold_bin * \param threshold Thresholds of real feature value, use bitset to represent * \param num_threshold size of threshold * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param left_weight Weight of left child * \param right_weight Weight of right child * \param gain Split gain * \return The index of new leaf. */ int SplitCategorical(int leaf, int feature, int real_feature, const uint32_t* threshold_bin, int num_threshold_bin, const uint32_t* threshold, int num_threshold, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain, MissingType missing_type); /*! \brief Get the output of one leaf */ inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; } /*! \brief Set the output of one leaf */ inline void SetLeafOutput(int leaf, double output) { leaf_value_[leaf] = output; } /*! * \brief Adding prediction value of this tree model to scores * \param data The dataset * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, data_size_t num_data, double* score) const; /*! * \brief Adding prediction value of this tree model to scores * \param data The dataset * \param used_data_indices Indices of used data * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, const data_size_t* used_data_indices, data_size_t num_data, double* score) const; /*! * \brief Prediction on one record * \param feature_values Feature value of this record * \return Prediction result */ inline double Predict(const double* feature_values) const; inline double PredictByMap(const std::unordered_map<int, double>& feature_values) const; inline int PredictLeafIndex(const double* feature_values) const; inline int PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const; inline void PredictContrib(const double* feature_values, int num_features, double* output); /*! \brief Get Number of leaves*/ inline int num_leaves() const { return num_leaves_; } /*! \brief Get depth of specific leaf*/ inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; } /*! \brief Get feature of specific split*/ inline int split_feature(int split_idx) const { return split_feature_[split_idx]; } inline double split_gain(int split_idx) const { return split_gain_[split_idx]; } /*! \brief Get the number of data points that fall at or below this node*/ inline int data_count(int node) const { return node >= 0 ? internal_count_[node] : leaf_count_[~node]; } /*! * \brief Shrinkage for the tree's output * shrinkage rate (a.k.a learning rate) is used to tune the training process * \param rate The factor of shrinkage */ inline void Shrinkage(double rate) { #pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048) for (int i = 0; i < num_leaves_; ++i) { leaf_value_[i] *= rate; } shrinkage_ *= rate; } inline double shrinkage() const { return shrinkage_; } inline void AddBias(double val) { #pragma omp parallel for schedule(static, 1024) if (num_leaves_ >= 2048) for (int i = 0; i < num_leaves_; ++i) { leaf_value_[i] = val + leaf_value_[i]; } // force to 1.0 shrinkage_ = 1.0f; } inline void AsConstantTree(double val) { num_leaves_ = 1; shrinkage_ = 1.0f; leaf_value_[0] = val; } /*! \brief Serialize this object to string*/ std::string ToString() const; /*! \brief Serialize this object to json*/ std::string ToJSON() const; /*! \brief Serialize this object to if-else statement*/ std::string ToIfElse(int index, bool predict_leaf_index) const; inline static bool IsZero(double fval) { if (fval > -kZeroThreshold && fval <= kZeroThreshold) { return true; } else { return false; } } inline static bool GetDecisionType(int8_t decision_type, int8_t mask) { return (decision_type & mask) > 0; } inline static void SetDecisionType(int8_t* decision_type, bool input, int8_t mask) { if (input) { (*decision_type) |= mask; } else { (*decision_type) &= (127 - mask); } } inline static int8_t GetMissingType(int8_t decision_type) { return (decision_type >> 2) & 3; } inline static void SetMissingType(int8_t* decision_type, int8_t input) { (*decision_type) &= 3; (*decision_type) |= (input << 2); } void RecomputeMaxDepth(); private: std::string NumericalDecisionIfElse(int node) const; std::string CategoricalDecisionIfElse(int node) const; inline int NumericalDecision(double fval, int node) const { uint8_t missing_type = GetMissingType(decision_type_[node]); if (std::isnan(fval)) { if (missing_type != 2) { fval = 0.0f; } } if ((missing_type == 1 && IsZero(fval)) || (missing_type == 2 && std::isnan(fval))) { if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) { return left_child_[node]; } else { return right_child_[node]; } } if (fval <= threshold_[node]) { return left_child_[node]; } else { return right_child_[node]; } } inline int NumericalDecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const { uint8_t missing_type = GetMissingType(decision_type_[node]); if ((missing_type == 1 && fval == default_bin) || (missing_type == 2 && fval == max_bin)) { if (GetDecisionType(decision_type_[node], kDefaultLeftMask)) { return left_child_[node]; } else { return right_child_[node]; } } if (fval <= threshold_in_bin_[node]) { return left_child_[node]; } else { return right_child_[node]; } } inline int CategoricalDecision(double fval, int node) const { uint8_t missing_type = GetMissingType(decision_type_[node]); int int_fval = static_cast<int>(fval); if (int_fval < 0) { return right_child_[node];; } else if (std::isnan(fval)) { // NaN is always in the right if (missing_type == 2) { return right_child_[node]; } int_fval = 0; } int cat_idx = static_cast<int>(threshold_[node]); if (Common::FindInBitset(cat_threshold_.data() + cat_boundaries_[cat_idx], cat_boundaries_[cat_idx + 1] - cat_boundaries_[cat_idx], int_fval)) { return left_child_[node]; } return right_child_[node]; } inline int CategoricalDecisionInner(uint32_t fval, int node) const { int cat_idx = static_cast<int>(threshold_in_bin_[node]); if (Common::FindInBitset(cat_threshold_inner_.data() + cat_boundaries_inner_[cat_idx], cat_boundaries_inner_[cat_idx + 1] - cat_boundaries_inner_[cat_idx], fval)) { return left_child_[node]; } return right_child_[node]; } inline int Decision(double fval, int node) const { if (GetDecisionType(decision_type_[node], kCategoricalMask)) { return CategoricalDecision(fval, node); } else { return NumericalDecision(fval, node); } } inline int DecisionInner(uint32_t fval, int node, uint32_t default_bin, uint32_t max_bin) const { if (GetDecisionType(decision_type_[node], kCategoricalMask)) { return CategoricalDecisionInner(fval, node); } else { return NumericalDecisionInner(fval, node, default_bin, max_bin); } } inline void Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain); /*! * \brief Find leaf index of which record belongs by features * \param feature_values Feature value of this record * \return Leaf index */ inline int GetLeaf(const double* feature_values) const; inline int GetLeafByMap(const std::unordered_map<int, double>& feature_values) const; /*! \brief Serialize one node to json*/ std::string NodeToJSON(int index) const; /*! \brief Serialize one node to if-else statement*/ std::string NodeToIfElse(int index, bool predict_leaf_index) const; std::string NodeToIfElseByMap(int index, bool predict_leaf_index) const; double ExpectedValue() const; /*! \brief This is used fill in leaf_depth_ after reloading a model*/ inline void RecomputeLeafDepths(int node = 0, int depth = 0); /*! * \brief Used by TreeSHAP for data we keep about our decision path */ struct PathElement { int feature_index; double zero_fraction; double one_fraction; // note that pweight is included for convenience and is not tied with the other attributes, // the pweight of the i'th path element is the permutation weight of paths with i-1 ones in them double pweight; PathElement() {} PathElement(int i, double z, double o, double w) : feature_index(i), zero_fraction(z), one_fraction(o), pweight(w) {} }; /*! \brief Polynomial time algorithm for SHAP values (arXiv:1706.06060)*/ void TreeSHAP(const double *feature_values, double *phi, int node, int unique_depth, PathElement *parent_unique_path, double parent_zero_fraction, double parent_one_fraction, int parent_feature_index) const; /*! \brief Extend our decision path with a fraction of one and zero extensions for TreeSHAP*/ static void ExtendPath(PathElement *unique_path, int unique_depth, double zero_fraction, double one_fraction, int feature_index); /*! \brief Undo a previous extension of the decision path for TreeSHAP*/ static void UnwindPath(PathElement *unique_path, int unique_depth, int path_index); /*! determine what the total permutation weight would be if we unwound a previous extension in the decision path*/ static double UnwoundPathSum(const PathElement *unique_path, int unique_depth, int path_index); /*! \brief Number of max leaves*/ int max_leaves_; /*! \brief Number of current leaves*/ int num_leaves_; // following values used for non-leaf node /*! \brief A non-leaf node's left child */ std::vector<int> left_child_; /*! \brief A non-leaf node's right child */ std::vector<int> right_child_; /*! \brief A non-leaf node's split feature */ std::vector<int> split_feature_inner_; /*! \brief A non-leaf node's split feature, the original index */ std::vector<int> split_feature_; /*! \brief A non-leaf node's split threshold in bin */ std::vector<uint32_t> threshold_in_bin_; /*! \brief A non-leaf node's split threshold in feature value */ std::vector<double> threshold_; int num_cat_; std::vector<int> cat_boundaries_inner_; std::vector<uint32_t> cat_threshold_inner_; std::vector<int> cat_boundaries_; std::vector<uint32_t> cat_threshold_; /*! \brief Store the information for categorical feature handle and missing value handle. */ std::vector<int8_t> decision_type_; /*! \brief A non-leaf node's split gain */ std::vector<float> split_gain_; // used for leaf node /*! \brief The parent of leaf */ std::vector<int> leaf_parent_; /*! \brief Output of leaves */ std::vector<double> leaf_value_; /*! \brief weight of leaves */ std::vector<double> leaf_weight_; /*! \brief DataCount of leaves */ std::vector<int> leaf_count_; /*! \brief Output of non-leaf nodes */ std::vector<double> internal_value_; /*! \brief weight of non-leaf nodes */ std::vector<double> internal_weight_; /*! \brief DataCount of non-leaf nodes */ std::vector<int> internal_count_; /*! \brief Depth for leaves */ std::vector<int> leaf_depth_; double shrinkage_; int max_depth_; }; inline void Tree::Split(int leaf, int feature, int real_feature, double left_value, double right_value, int left_cnt, int right_cnt, double left_weight, double right_weight, float gain) { int new_node_idx = num_leaves_ - 1; // update parent info int parent = leaf_parent_[leaf]; if (parent >= 0) { // if cur node is left child if (left_child_[parent] == ~leaf) { left_child_[parent] = new_node_idx; } else { right_child_[parent] = new_node_idx; } } // add new node split_feature_inner_[new_node_idx] = feature; split_feature_[new_node_idx] = real_feature; split_gain_[new_node_idx] = gain; // add two new leaves left_child_[new_node_idx] = ~leaf; right_child_[new_node_idx] = ~num_leaves_; // update new leaves leaf_parent_[leaf] = new_node_idx; leaf_parent_[num_leaves_] = new_node_idx; // save current leaf value to internal node before change internal_weight_[new_node_idx] = leaf_weight_[leaf]; internal_value_[new_node_idx] = leaf_value_[leaf]; internal_count_[new_node_idx] = left_cnt + right_cnt; leaf_value_[leaf] = std::isnan(left_value) ? 0.0f : left_value; leaf_weight_[leaf] = left_weight; leaf_count_[leaf] = left_cnt; leaf_value_[num_leaves_] = std::isnan(right_value) ? 0.0f : right_value; leaf_weight_[num_leaves_] = right_weight; leaf_count_[num_leaves_] = right_cnt; // update leaf depth leaf_depth_[num_leaves_] = leaf_depth_[leaf] + 1; leaf_depth_[leaf]++; } inline double Tree::Predict(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return LeafOutput(leaf); } else { return leaf_value_[0]; } } inline double Tree::PredictByMap(const std::unordered_map<int, double>& feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeafByMap(feature_values); return LeafOutput(leaf); } else { return leaf_value_[0]; } } inline int Tree::PredictLeafIndex(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return leaf; } else { return 0; } } inline int Tree::PredictLeafIndexByMap(const std::unordered_map<int, double>& feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeafByMap(feature_values); return leaf; } else { return 0; } } inline void Tree::PredictContrib(const double* feature_values, int num_features, double* output) { output[num_features] += ExpectedValue(); // Run the recursion with preallocated space for the unique path data if (num_leaves_ > 1) { CHECK(max_depth_ >= 0); const int max_path_len = max_depth_ + 1; std::vector<PathElement> unique_path_data(max_path_len*(max_path_len + 1) / 2); TreeSHAP(feature_values, output, 0, 0, unique_path_data.data(), 1, 1, -1); } } inline void Tree::RecomputeLeafDepths(int node, int depth) { if (node == 0) leaf_depth_.resize(num_leaves()); if (node < 0) { leaf_depth_[~node] = depth; } else { RecomputeLeafDepths(left_child_[node], depth + 1); RecomputeLeafDepths(right_child_[node], depth + 1); } } inline int Tree::GetLeaf(const double* feature_values) const { int node = 0; if (num_cat_ > 0) { while (node >= 0) { node = Decision(feature_values[split_feature_[node]], node); } } else { while (node >= 0) { node = NumericalDecision(feature_values[split_feature_[node]], node); } } return ~node; } inline int Tree::GetLeafByMap(const std::unordered_map<int, double>& feature_values) const { int node = 0; if (num_cat_ > 0) { while (node >= 0) { node = Decision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node); } } else { while (node >= 0) { node = NumericalDecision(feature_values.count(split_feature_[node]) > 0 ? feature_values.at(split_feature_[node]) : 0.0f, node); } } return ~node; } } // namespace LightGBM #endif // LightGBM_TREE_H_
evolve.c
/** @file evolve.c @brief This file contains all the core VPLANET integration routines including the timestepping algorithm and the Runge-Kutta Integration scheme. @author Rory Barnes ([RoryBarnes](https://github.com/RoryBarnes/)) @date May 2014 */ #define NUM_THREADS 4 #include "vplanet.h" void PropsAuxGeneral(BODY *body, CONTROL *control) { /* Recompute the mean motion, necessary for most modules */ int iBody; // Dummy counting variable for (iBody = 0; iBody < control->Evolve.iNumBodies; iBody++) { if (iBody != 0 && body[iBody].bBinary == 0) { body[iBody].dMeanMotion = fdSemiToMeanMotion( body[iBody].dSemi, (body[0].dMass + body[iBody].dMass)); } } } void PropertiesAuxiliary(BODY *body, CONTROL *control, SYSTEM *system, UPDATE *update) { /* Evaluate single and multi-module auxialliary functions to update parameters * of interest such as mean motion. */ int iBody, iModule; // Dummy counter variables PropsAuxGeneral(body, control); /* Get properties from each module */ for (iBody = 0; iBody < control->Evolve.iNumBodies; iBody++) { // Uni-module properties for (iModule = 0; iModule < control->Evolve.iNumModules[iBody]; iModule++) { control->fnPropsAux[iBody][iModule](body, &control->Evolve, &control->Io, update, iBody); } // Multi-module properties for (iModule = 0; iModule < control->iNumMultiProps[iBody]; iModule++) { control->fnPropsAuxMulti[iBody][iModule](body, &control->Evolve, &control->Io, update, iBody); } } } void CalculateDerivatives(BODY *body, SYSTEM *system, UPDATE *update, fnUpdateVariable ***fnUpdate, int iNumBodies) { int iBody, iVar, iEqn; for (iBody = 0; iBody < iNumBodies; iBody++) { for (iVar = 0; iVar < update[iBody].iNumVars; iVar++) { update[iBody].daDeriv[iVar] = 0; for (iEqn = 0; iEqn < update[iBody].iNumEqns[iVar]; iEqn++) { update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn]( body, system, update[iBody].iaBody[iVar][iEqn]); update[iBody].daDeriv[iVar] += update[iBody].daDerivProc[iVar][iEqn]; } } } iBody = 0; } void CheckProgress(BODY *body, CONTROL *control, SYSTEM *system, UPDATE *update) { int iBody, jBody; if (control->Io.iVerbose >= VERBPROG && !control->Io.bMutualIncMessage && control->Io.dMaxMutualInc > 0) { // If made it here, more than 1 body must be present if (body[1].bSpiNBody) { // Calculate orbital elements for (iBody = 0; iBody < control->Evolve.iNumBodies; iBody++) { cart2osc(body, iBody); } } // Skip central body for (iBody = 1; iBody < control->Evolve.iNumBodies; iBody++) { for (jBody = iBody + 1; jBody < control->Evolve.iNumBodies; jBody++) { // 1 to check progress, not halt if (fbCheckMaxMutualInc(body, &control->Evolve, control->Halt, &control->Io, iBody, jBody, 1)) { /* if (control->Io.iVerbose >= VERBPROG) { printf("WARNING: Mutual inclination of %s and %s exceeds ", body[iBody].cName,body[jBody].cName); fprintd(stdout,control->Io.dMaxMutualInc,control->Io.iSciNot, control->Io.iDigits); printf(" at t = %.2e years.\n",control->Evolve.dTime); } */ control->Io.bMutualIncMessage = 1; } } } } } /* * Integration Control */ double AssignDt(double dMin, double dNextOutput, double dEta) { /* Compute the next timestep, dt, making sure it's not larger than the output * cadence */ dMin = dEta * dMin; if (dNextOutput < dMin) { dMin = dNextOutput; } return dMin; } double fdGetTimeStep(BODY *body, CONTROL *control, SYSTEM *system, UPDATE *update, fnUpdateVariable ***fnUpdate) { /* Fills the Update arrays with the derivatives * or new values. It returns the smallest timescale for use * in variable timestepping. Uses either a 4th order Runge-Kutte integrator or * an Euler step. */ int iBody, iVar, iEqn; // Dummy counting variables EVOLVE integr; // Dummy EVOLVE struct so we don't have to dereference control a lot double dVarNow, dMinNow, dMin = dHUGE, dVarTotal; // Intermediate storage variables integr = control->Evolve; dMin = dHUGE; for (iBody = 0; iBody < control->Evolve.iNumBodies; iBody++) { if (update[iBody].iNumVars > 0) { for (iVar = 0; iVar < update[iBody].iNumVars; iVar++) { // The parameter does not require a derivative, but is calculated // explicitly as a function of age. /* printf("%d %d\n",iBody,iVar); fflush(stdout); */ if (update[iBody].iaType[iVar][0] == 0) { dVarNow = *update[iBody].pdVar[iVar]; for (iEqn = 0; iEqn < update[iBody].iNumEqns[iVar]; iEqn++) { update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn]( body, system, update[iBody].iaBody[iVar][iEqn]); } if (control->Evolve.bFirstStep) { dMin = integr.dTimeStep; control->Evolve.bFirstStep = 0; } else { /* Sum over all equations giving new value of the variable */ dVarTotal = 0.; for (iEqn = 0; iEqn < update[iBody].iNumEqns[iVar]; iEqn++) { dVarTotal += update[iBody].daDerivProc[iVar][iEqn]; } // Prevent division by zero if (dVarNow != dVarTotal) { dMinNow = fabs(dVarNow / ((dVarNow - dVarTotal) / integr.dTimeStep)); if (dMinNow < dMin) { dMin = dMinNow; } } } /* Equations that are integrated in the matrix but are NOT allowed to dictate timestepping. These are derived quantities, like lost energy, that must be integrated as primary variables to keep track of them properly, i.e. lost energy depends on changing radii, which are integrated. But in this case, since they are derived quantities, they should NOT participate in timestep selection - dflemin3 */ } else if (update[iBody].iaType[iVar][0] == 5) { // continue; for (iEqn = 0; iEqn < update[iBody].iNumEqns[iVar]; iEqn++) { update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn]( body, system, update[iBody].iaBody[iVar][iEqn]); } /* Integration for binary, where parameters can be computed via derivatives, or as an explicit function of age */ } else if (update[iBody].iaType[iVar][0] == 10) { /* Equations not in matrix, computing things as explicit function of time, so we set dMin to time until next output Figure out time until next output */ dMinNow = control->Io.dNextOutput; if (dMinNow < dMin) { dMin = dMinNow; } /* The parameter does not require a derivative, but is calculated explicitly as a function of age and is a sinusoidal quantity (e.g. h,k,p,q in DistOrb) */ } else if (update[iBody].iaType[iVar][0] == 3) { dVarNow = *update[iBody].pdVar[iVar]; for (iEqn = 0; iEqn < update[iBody].iNumEqns[iVar]; iEqn++) { update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn]( body, system, update[iBody].iaBody[iVar][iEqn]); } if (control->Evolve.bFirstStep) { dMin = integr.dTimeStep; control->Evolve.bFirstStep = 0; } else { /* Sum over all equations giving new value of the variable */ dVarTotal = 0.; for (iEqn = 0; iEqn < update[iBody].iNumEqns[iVar]; iEqn++) { dVarTotal += update[iBody].daDerivProc[iVar][iEqn]; } // Prevent division by zero if (dVarNow != dVarTotal) { dMinNow = fabs(1.0 / ((dVarNow - dVarTotal) / integr.dTimeStep)); if (dMinNow < dMin) { dMin = dMinNow; } } } /* The parameter is a "polar/sinusoidal quantity" and controlled by a time derivative */ } else { for (iEqn = 0; iEqn < update[iBody].iNumEqns[iVar]; iEqn++) { if (update[iBody].iaType[iVar][iEqn] == 2) { update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn]( body, system, update[iBody].iaBody[iVar][iEqn]); // if (update[iBody].daDerivProc[iVar][iEqn] != 0 && // *(update[iBody].pdVar[iVar]) != 0) { if (update[iBody].daDerivProc[iVar][iEqn] != 0) { /* ?Obl require special treatment because they can overconstrain obliquity and PrecA */ if (iVar == update[iBody].iXobl || iVar == update[iBody].iYobl || iVar == update[iBody].iZobl) { if (body[iBody].dObliquity != 0) { dMinNow = fabs(sin(body[iBody].dObliquity) / update[iBody].daDerivProc[iVar][iEqn]); } else { // Obliquity is 0, so its evolution shouldn't impact // the timestep dMinNow = dHUGE; } } else if (iVar == update[iBody].iHecc || iVar == update[iBody].iKecc) { if (body[iBody].dEcc != 0) { dMinNow = fabs(body[iBody].dEcc / update[iBody].daDerivProc[iVar][iEqn]); } else { // Eccentricity is 0, so its evolution shouldn't // impact the timestep dMinNow = dHUGE; } } else { dMinNow = fabs(1.0 / update[iBody].daDerivProc[iVar][iEqn]); } if (dMinNow < dMin) { dMin = dMinNow; } } // enforce a minimum step size for ice sheets, otherwise dDt -> 0 // real fast } else if (update[iBody].iaType[iVar][iEqn] == 9) { update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn]( body, system, update[iBody].iaBody[iVar][iEqn]); if (update[iBody].daDerivProc[iVar][iEqn] != 0 && *(update[iBody].pdVar[iVar]) != 0) { dMinNow = fabs((*(update[iBody].pdVar[iVar])) / update[iBody].daDerivProc[iVar][iEqn]); if (dMinNow < dMin) { if (dMinNow < control->Halt[iBody].iMinIceDt * (2 * PI / body[iBody].dMeanMotion) / control->Evolve.dEta) { dMin = control->Halt[iBody].iMinIceDt * (2 * PI / body[iBody].dMeanMotion) / control->Evolve.dEta; } else { dMin = dMinNow; } } } /* SpiNBody timestep: As x,y,z can cross over 0, the usual x/(dx/dt) timstep doesn't work. This version compares the orbital radius to velocity. Probably room for improvement here. */ } else if (update[iBody].iaType[iVar][iEqn] == 7) { if ((control->Evolve.bSpiNBodyDistOrb == 0) || (control->Evolve.bUsingSpiNBody == 1)) { update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn]( body, system, update[iBody].iaBody[iVar][iEqn]); dMinNow = sqrt((body[iBody].dPositionX * body[iBody].dPositionX + body[iBody].dPositionY * body[iBody].dPositionY + body[iBody].dPositionZ * body[iBody].dPositionZ) / (body[iBody].dVelX * body[iBody].dVelX + body[iBody].dVelY * body[iBody].dVelY + body[iBody].dVelZ * body[iBody].dVelZ)); if (dMinNow < dMin) { dMin = dMinNow; } } } else { // The parameter is controlled by a time derivative update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn]( body, system, update[iBody].iaBody[iVar][iEqn]); if (!bFloatComparison(update[iBody].daDerivProc[iVar][iEqn], 0.0) && !bFloatComparison(*(update[iBody].pdVar[iVar]), 0.0)) { dMinNow = fabs((*(update[iBody].pdVar[iVar])) / update[iBody].daDerivProc[iVar][iEqn]); if (dMinNow < dMin) { dMin = dMinNow; } } } } // for loop } // else polar/sinusoidal } // for iNumVars } // if (update[iBody].iNumVars > 0) } // for loop iNumBodies return dMin; } void fdGetUpdateInfo(BODY *body, CONTROL *control, SYSTEM *system, UPDATE *update, fnUpdateVariable ***fnUpdate) { /* Fills the Update arrays with the derivatives * or new values.. */ int iBody, iVar, iEqn, iNumBodies, iNumVars, iNumEqns; // Dummy counting variables EVOLVE integr; // Dummy EVOLVE struct so we don't have to dereference control a lot double dVarNow, dMinNow, dMin = dHUGE, dVarTotal; // Intermediate storage variables integr = control->Evolve; iNumBodies = control->Evolve.iNumBodies; for (iBody = 0; iBody < iNumBodies; iBody++) { if (update[iBody].iNumVars > 0) { iNumVars = update[iBody].iNumVars; for (iVar = 0; iVar < iNumVars; iVar++) { iNumEqns = update[iBody].iNumEqns[iVar]; for (iEqn = 0; iEqn < iNumEqns; iEqn++) { update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn]( body, system, update[iBody].iaBody[iVar][iEqn]); } } } } } void EulerStep(BODY *body, CONTROL *control, SYSTEM *system, UPDATE *update, fnUpdateVariable ***fnUpdate, double *dDt, int iDir) { /* Compute and apply an Euler update step to a given parameter (x = dx/dt * * dt) */ int iBody, iVar, iEqn; double dFoo; /* Adjust dt? */ if (control->Evolve.bVarDt) { /* dDt is the dynamical timescale */ *dDt = fdGetTimeStep(body, control, system, update, fnUpdate); *dDt = AssignDt(*dDt, (control->Io.dNextOutput - control->Evolve.dTime), control->Evolve.dEta); } for (iBody = 0; iBody < control->Evolve.iNumBodies; iBody++) { for (iVar = 0; iVar < update[iBody].iNumVars; iVar++) { for (iEqn = 0; iEqn < update[iBody].iNumEqns[iVar]; iEqn++) { if (update[iBody].iaType[iVar][iEqn] == 0) { *(update[iBody].pdVar[iVar]) = update[iBody].daDerivProc[iVar][iEqn]; } else { /* Update the parameter in the BODY struct! Be careful! */ *(update[iBody].pdVar[iVar]) += iDir * update[iBody].daDerivProc[iVar][iEqn] * (*dDt); } } } } } void RungeKutta4Step(BODY *body, CONTROL *control, SYSTEM *system, UPDATE *update, fnUpdateVariable ***fnUpdate, double *dDt, int iDir) { /* Compute and apply a 4th order Runge-Kutta update step a given parameter. */ int iBody, iVar, iEqn, iSubStep, iNumBodies, iNumVars, iNumEqns; double dFoo, dDelta; EVOLVE *evolve = &( control->Evolve); // Save Evolve as a variable for speed and legibility /* Create a copy of BODY array */ BodyCopy(evolve->tmpBody, body, &control->Evolve); /* Derivatives at start */ *dDt = fdGetTimeStep(body, control, system, control->Evolve.tmpUpdate, fnUpdate); /* Adjust dt? */ if (evolve->bVarDt) { /* This is minimum dynamical timescale */ *dDt = AssignDt(*dDt, (control->Io.dNextOutput - evolve->dTime), evolve->dEta); } else { *dDt = evolve->dTimeStep; } evolve->dCurrentDt = *dDt; iNumBodies = evolve->iNumBodies; #pragma omp parallel for num_threads(NUM_THREADS) private(iNumVars, iNumEqns, \ iVar, iEqn) for (iBody = 0; iBody < iNumBodies; iBody++) { // int thread_num = omp_get_thread_num(); // int cpu_num = sched_getcpu(); // printf("Thread %3d is running on CPU %3d\n", thread_num, cpu_num); double daDerivVar; iNumVars = update[iBody].iNumVars; for (iVar = 0; iVar < iNumVars; iVar++) { daDerivVar = 0; iNumEqns = update[iBody].iNumEqns[iVar]; for (iEqn = 0; iEqn < iNumEqns; iEqn++) { daDerivVar += iDir * evolve->tmpUpdate[iBody].daDerivProc[iVar][iEqn]; evolve->daDerivProc[0][iBody][iVar][iEqn] = evolve->tmpUpdate[iBody].daDerivProc[iVar][iEqn]; } evolve->daDeriv[0][iBody][iVar] = daDerivVar; } } for (iBody = 0; iBody < iNumBodies; iBody++) { iNumVars = update[iBody].iNumVars; for (iVar = 0; iVar < iNumVars; iVar++) { if (update[iBody].iaType[iVar][0] == 0 || update[iBody].iaType[iVar][0] == 3 || update[iBody].iaType[iVar][0] == 10) { // LUGER: Note that this is the VALUE of the variable getting passed, // contrary to what the names suggest These values are updated in the // tmpUpdate struct so that equations which are dependent upon them will // be evaluated with higher accuracy *(evolve->tmpUpdate[iBody].pdVar[iVar]) = evolve->daDeriv[0][iBody][iVar]; } else { /* While we're in this loop, move each parameter to the midpoint of the * timestep */ *(evolve->tmpUpdate[iBody].pdVar[iVar]) = *(update[iBody].pdVar[iVar]) + 0.5 * (*dDt) * evolve->daDeriv[0][iBody][iVar]; } } } /* First midpoint derivative.*/ PropertiesAuxiliary(evolve->tmpBody, control, system, update); fdGetUpdateInfo(evolve->tmpBody, control, system, evolve->tmpUpdate, fnUpdate); #pragma omp parallel for num_threads(NUM_THREADS) private(iNumVars, iNumEqns, \ iVar, iEqn) for (iBody = 0; iBody < iNumBodies; iBody++) { iNumVars = update[iBody].iNumVars; double daDerivVar; for (iVar = 0; iVar < iNumVars; iVar++) { daDerivVar = 0; iNumEqns = update[iBody].iNumEqns[iVar]; for (iEqn = 0; iEqn < iNumEqns; iEqn++) { daDerivVar += iDir * evolve->tmpUpdate[iBody].daDerivProc[iVar][iEqn]; evolve->daDerivProc[1][iBody][iVar][iEqn] = evolve->tmpUpdate[iBody].daDerivProc[iVar][iEqn]; } evolve->daDeriv[1][iBody][iVar] = daDerivVar; } } for (iBody = 0; iBody < iNumBodies; iBody++) { iNumVars = update[iBody].iNumVars; for (iVar = 0; iVar < iNumVars; iVar++) { if (update[iBody].iaType[iVar][0] == 0 || update[iBody].iaType[iVar][0] == 3 || update[iBody].iaType[iVar][0] == 10) { // LUGER: Note that this is the VALUE of the variable getting passed, // contrary to what the names suggest These values are updated in the // tmpUpdate struct so that equations which are dependent upon them will // be evaluated with higher accuracy *(evolve->tmpUpdate[iBody].pdVar[iVar]) = evolve->daDeriv[1][iBody][iVar]; } else { /* While we're in this loop, move each parameter to the midpoint of the timestep based on the midpoint derivative. */ *(evolve->tmpUpdate[iBody].pdVar[iVar]) = *(update[iBody].pdVar[iVar]) + 0.5 * (*dDt) * evolve->daDeriv[1][iBody][iVar]; } } } /* Second midpoint derivative */ PropertiesAuxiliary(evolve->tmpBody, control, system, update); fdGetUpdateInfo(evolve->tmpBody, control, system, evolve->tmpUpdate, fnUpdate); #pragma omp parallel for num_threads(NUM_THREADS) private(iNumVars, iNumEqns, \ iVar, iEqn) for (iBody = 0; iBody < iNumBodies; iBody++) { iNumVars = update[iBody].iNumVars; double daDerivVar; for (iVar = 0; iVar < iNumVars; iVar++) { daDerivVar = 0; iNumEqns = update[iBody].iNumEqns[iVar]; for (iEqn = 0; iEqn < iNumEqns; iEqn++) { daDerivVar += iDir * evolve->tmpUpdate[iBody].daDerivProc[iVar][iEqn]; evolve->daDerivProc[2][iBody][iVar][iEqn] = evolve->tmpUpdate[iBody].daDerivProc[iVar][iEqn]; } evolve->daDeriv[2][iBody][iVar] = daDerivVar; } } for (iBody = 0; iBody < iNumBodies; iBody++) { iNumVars = update[iBody].iNumVars; for (iVar = 0; iVar < iNumVars; iVar++) { if (update[iBody].iaType[iVar][0] == 0 || update[iBody].iaType[iVar][0] == 3 || update[iBody].iaType[iVar][0] == 10) { // LUGER: Note that this is the VALUE of the variable getting passed, // contrary to what the names suggest These values are updated in the // tmpUpdate struct so that equations which are dependent upon them will // be evaluated with higher accuracy *(evolve->tmpUpdate[iBody].pdVar[iVar]) = evolve->daDeriv[2][iBody][iVar]; } else { /* While we're in this loop, move each parameter to the end of the timestep based on the second midpoint derivative. */ *(evolve->tmpUpdate[iBody].pdVar[iVar]) = *(update[iBody].pdVar[iVar]) + *dDt * evolve->daDeriv[2][iBody][iVar]; } } } /* Full step derivative */ PropertiesAuxiliary(evolve->tmpBody, control, system, update); fdGetUpdateInfo(evolve->tmpBody, control, system, evolve->tmpUpdate, fnUpdate); #pragma omp parallel for num_threads(NUM_THREADS) private(iNumVars, iNumEqns, \ iVar, iEqn) for (iBody = 0; iBody < iNumBodies; iBody++) { double daDerivVar; iNumVars = update[iBody].iNumVars; for (iVar = 0; iVar < iNumVars; iVar++) { daDerivVar = 0; if (update[iBody].iaType[iVar][0] == 0 || update[iBody].iaType[iVar][0] == 3 || update[iBody].iaType[iVar][0] == 10) { // NOTHING! } else { evolve->daDeriv[3][iBody][iVar] = 0; iNumEqns = update[iBody].iNumEqns[iVar]; for (iEqn = 0; iEqn < iNumEqns; iEqn++) { daDerivVar += iDir * evolve->tmpUpdate[iBody].daDerivProc[iVar][iEqn]; evolve->daDerivProc[3][iBody][iVar][iEqn] = evolve->tmpUpdate[iBody].daDerivProc[iVar][iEqn]; } evolve->daDeriv[3][iBody][iVar] = daDerivVar; } } } /* Now do the update -- Note the pointer to the home of the actual * variables!!! */ for (iBody = 0; iBody < iNumBodies; iBody++) { iNumVars = update[iBody].iNumVars; for (iVar = 0; iVar < iNumVars; iVar++) { update[iBody].daDeriv[iVar] = 1. / 6 * (evolve->daDeriv[0][iBody][iVar] + 2 * evolve->daDeriv[1][iBody][iVar] + 2 * evolve->daDeriv[2][iBody][iVar] + evolve->daDeriv[3][iBody][iVar]); for (iEqn = 0; iEqn < update[iBody].iNumEqns[iVar]; iEqn++) { update[iBody].daDerivProc[iVar][iEqn] = 1. / 6 * (evolve->daDerivProc[0][iBody][iVar][iEqn] + 2 * evolve->daDerivProc[1][iBody][iVar][iEqn] + 2 * evolve->daDerivProc[2][iBody][iVar][iEqn] + evolve->daDerivProc[3][iBody][iVar][iEqn]); } if (update[iBody].iaType[iVar][0] == 0 || update[iBody].iaType[iVar][0] == 3 || update[iBody].iaType[iVar][0] == 10) { // LUGER: Note that this is the VALUE of the variable getting passed, // contrary to what the names suggest *(update[iBody].pdVar[iVar]) = evolve->daDeriv[0][iBody][iVar]; } else { *(update[iBody].pdVar[iVar]) += update[iBody].daDeriv[iVar] * (*dDt); } } } } /* * Evolution Subroutine */ void Evolve(BODY *body, CONTROL *control, FILES *files, MODULE *module, OUTPUT *output, SYSTEM *system, UPDATE *update, fnUpdateVariable ***fnUpdate, fnWriteOutput *fnWrite, fnIntegrate fnOneStep) { /* Master evolution routine that controls the simulation integration. */ int iDir, iBody, iModule, nSteps; // Dummy counting variables double dDt, dFoo; // Next timestep, dummy variable double dEqSpinRate; // Store the equilibrium spin rate nSteps = 0; if (control->Evolve.bDoForward) { iDir = 1; } else { iDir = -1; } PropertiesAuxiliary(body, control, system, update); control->Io.dNextOutput = control->Evolve.dTime + control->Io.dOutputTime; // Get derivatives at start, useful for logging dDt = fdGetTimeStep(body, control, system, update, fnUpdate); /* Adjust dt? */ if (control->Evolve.bVarDt) { /* Now choose the correct timestep */ dDt = AssignDt(dDt, (control->Io.dNextOutput - control->Evolve.dTime), control->Evolve.dEta); } else { dDt = control->Evolve.dTimeStep; } /* Write out initial conditions */ WriteOutput(body, control, files, output, system, update, fnWrite, control->Evolve.dTime, dDt); /* If Runge-Kutta need to copy actual update to that in control->Evolve. This transfer all the meta-data about the struct. */ UpdateCopy(control->Evolve.tmpUpdate, update, control->Evolve.iNumBodies); /* * * Main loop begins here * */ while (control->Evolve.dTime < control->Evolve.dStopTime) { /* Take one step */ fnOneStep(body, control, system, update, fnUpdate, &dDt, iDir); for (iBody = 0; iBody < control->Evolve.iNumBodies; iBody++) { for (iModule = 0; iModule < control->Evolve.iNumModules[iBody]; iModule++) { control->fnForceBehavior[iBody][iModule](body, module, &control->Evolve, &control->Io, system, update, fnUpdate, iBody, iModule); } for (iModule = 0; iModule < control->iNumMultiForce[iBody]; iModule++) { control->fnForceBehaviorMulti[iBody][iModule]( body, module, &control->Evolve, &control->Io, system, update, fnUpdate, iBody, iModule); } } fdGetUpdateInfo(body, control, system, update, fnUpdate); /* Halt? */ if (fbCheckHalt(body, control, update, fnUpdate)) { fdGetUpdateInfo(body, control, system, update, fnUpdate); WriteOutput(body, control, files, output, system, update, fnWrite, control->Evolve.dTime, control->Io.dOutputTime / control->Evolve.nSteps); return; } for (iBody = 0; iBody < control->Evolve.iNumBodies; iBody++) { body[iBody].dAge += iDir * dDt; } control->Evolve.dTime += dDt; nSteps++; /* Time for Output? */ if (control->Evolve.dTime >= control->Io.dNextOutput) { control->Evolve.nSteps += nSteps; WriteOutput(body, control, files, output, system, update, fnWrite, control->Evolve.dTime, control->Io.dOutputTime / control->Evolve.nSteps); // Timesteps are synchronized with the output time, so this statement is // sufficient control->Io.dNextOutput += control->Io.dOutputTime; nSteps = 0; } /* Get auxiliary properties for next step -- first call was prior to loop. */ PropertiesAuxiliary(body, control, system, update); // If control->Evolve.bFirstStep hasn't been switched off by now, do so. if (control->Evolve.bFirstStep) { control->Evolve.bFirstStep = 0; } // Any variables reached an interesting value? CheckProgress(body, control, system, update); } if (control->Io.iVerbose >= VERBPROG) { printf("Evolution completed.\n"); } // printf("%d\n",body[1].iBadImpulse); }
GB_unop__one_uint32_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__one_uint32_uint32 // op(A') function: GB_unop_tran__one_uint32_uint32 // C type: uint32_t // A type: uint32_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CAST(z, aij) \ ; ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ ; ; \ /* Cx [pC] = op (cast (aij)) */ \ ; ; \ Cx [pC] = 1 ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__one_uint32_uint32 ( uint32_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__one_uint32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nmf_pgd.c
/* Generated by Cython 0.29.14 */ /* BEGIN: Cython Metadata { "distutils": { "depends": [], "language": "c", "name": "gensim.models.nmf_pgd", "sources": [ "gensim/models/nmf_pgd.pyx" ] }, "module_name": "gensim.models.nmf_pgd" } END: Cython Metadata */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_14" #define CYTHON_HEX_VERSION 0x001D0EF0 #define CYTHON_FUTURE_DIVISION 0 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #define PyObject_Unicode PyObject_Str #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__gensim__models__nmf_pgd #define __PYX_HAVE_API__gensim__models__nmf_pgd /* Early includes */ #include <math.h> #include "pythread.h" #include <string.h> #include <stdlib.h> #include <stdio.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "gensim/models/nmf_pgd.pyx", "stringsource", }; /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":279 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag); /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'libc.math' */ /* Module declarations from 'gensim.models.nmf_pgd' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static double __pyx_f_6gensim_6models_7nmf_pgd_fmin(double, double); /*proto*/ static double __pyx_f_6gensim_6models_7nmf_pgd_fmax(double, double); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; #define __Pyx_MODULE_NAME "gensim.models.nmf_pgd" extern int __pyx_module_is_main_gensim__models__nmf_pgd; int __pyx_module_is_main_gensim__models__nmf_pgd = 0; /* Implementation of 'gensim.models.nmf_pgd' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_O[] = "O"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_h[] = "h"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_WtW[] = "WtW"; static const char __pyx_k_Wtv[] = "Wtv"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_grad[] = "grad"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_kappa[] = "kappa"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_hessian[] = "hessian"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_solve_h[] = "solve_h"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_n_samples[] = "n_samples"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_violation[] = "violation"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_sample_idx[] = "sample_idx"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_permutation[] = "permutation"; static const char __pyx_k_n_components[] = "n_components"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_projected_grad[] = "projected_grad"; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_component_idx_1[] = "component_idx_1"; static const char __pyx_k_component_idx_2[] = "component_idx_2"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_gensim_models_nmf_pgd[] = "gensim.models.nmf_pgd"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_gensim_models_nmf_pgd_pyx[] = "gensim/models/nmf_pgd.pyx"; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_n_s_WtW; static PyObject *__pyx_n_s_Wtv; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_component_idx_1; static PyObject *__pyx_n_s_component_idx_2; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_gensim_models_nmf_pgd; static PyObject *__pyx_kp_s_gensim_models_nmf_pgd_pyx; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_grad; static PyObject *__pyx_n_s_h; static PyObject *__pyx_n_s_hessian; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_kappa; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_n_components; static PyObject *__pyx_n_s_n_samples; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_new; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_permutation; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_projected_grad; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_sample_idx; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_solve_h; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_test; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_violation; static PyObject *__pyx_pf_6gensim_6models_7nmf_pgd_solve_h(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_h, __Pyx_memviewslice __pyx_v_Wtv, __Pyx_memviewslice __pyx_v_WtW, __Pyx_memviewslice __pyx_v_permutation, double __pyx_v_kappa); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__15; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__22; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__26; static PyObject *__pyx_codeobj__20; static PyObject *__pyx_codeobj__27; /* Late includes */ /* "gensim/models/nmf_pgd.pyx":12 * from cython.parallel import prange * * cdef double fmin(double x, double y) nogil: # <<<<<<<<<<<<<< * return x if x < y else y * */ static double __pyx_f_6gensim_6models_7nmf_pgd_fmin(double __pyx_v_x, double __pyx_v_y) { double __pyx_r; double __pyx_t_1; /* "gensim/models/nmf_pgd.pyx":13 * * cdef double fmin(double x, double y) nogil: * return x if x < y else y # <<<<<<<<<<<<<< * * cdef double fmax(double x, double y) nogil: */ if (((__pyx_v_x < __pyx_v_y) != 0)) { __pyx_t_1 = __pyx_v_x; } else { __pyx_t_1 = __pyx_v_y; } __pyx_r = __pyx_t_1; goto __pyx_L0; /* "gensim/models/nmf_pgd.pyx":12 * from cython.parallel import prange * * cdef double fmin(double x, double y) nogil: # <<<<<<<<<<<<<< * return x if x < y else y * */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "gensim/models/nmf_pgd.pyx":15 * return x if x < y else y * * cdef double fmax(double x, double y) nogil: # <<<<<<<<<<<<<< * return x if x > y else y * */ static double __pyx_f_6gensim_6models_7nmf_pgd_fmax(double __pyx_v_x, double __pyx_v_y) { double __pyx_r; double __pyx_t_1; /* "gensim/models/nmf_pgd.pyx":16 * * cdef double fmax(double x, double y) nogil: * return x if x > y else y # <<<<<<<<<<<<<< * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): */ if (((__pyx_v_x > __pyx_v_y) != 0)) { __pyx_t_1 = __pyx_v_x; } else { __pyx_t_1 = __pyx_v_y; } __pyx_r = __pyx_t_1; goto __pyx_L0; /* "gensim/models/nmf_pgd.pyx":15 * return x if x < y else y * * cdef double fmax(double x, double y) nogil: # <<<<<<<<<<<<<< * return x if x > y else y * */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "gensim/models/nmf_pgd.pyx":18 * return x if x > y else y * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): # <<<<<<<<<<<<<< * """Find optimal dense vector representation for current W and r matrices. * */ /* Python wrapper */ static PyObject *__pyx_pw_6gensim_6models_7nmf_pgd_1solve_h(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_6gensim_6models_7nmf_pgd_solve_h[] = "solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa)\nFind optimal dense vector representation for current W and r matrices.\n\n Parameters\n ----------\n h : matrix\n Dense representation of documents in current batch.\n Wtv : matrix\n WtW : matrix\n\n Returns\n -------\n float\n Cumulative difference between previous and current h vectors.\n\n "; static PyMethodDef __pyx_mdef_6gensim_6models_7nmf_pgd_1solve_h = {"solve_h", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6gensim_6models_7nmf_pgd_1solve_h, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6gensim_6models_7nmf_pgd_solve_h}; static PyObject *__pyx_pw_6gensim_6models_7nmf_pgd_1solve_h(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_h = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_Wtv = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_WtW = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_permutation = { 0, 0, { 0 }, { 0 }, { 0 } }; double __pyx_v_kappa; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("solve_h (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_h,&__pyx_n_s_Wtv,&__pyx_n_s_WtW,&__pyx_n_s_permutation,&__pyx_n_s_kappa,0}; PyObject* values[5] = {0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_h)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_Wtv)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, 1); __PYX_ERR(0, 18, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_WtW)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, 2); __PYX_ERR(0, 18, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_permutation)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, 3); __PYX_ERR(0, 18, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_kappa)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, 4); __PYX_ERR(0, 18, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "solve_h") < 0)) __PYX_ERR(0, 18, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 5) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); } __pyx_v_h = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_h.memview)) __PYX_ERR(0, 18, __pyx_L3_error) __pyx_v_Wtv = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_Wtv.memview)) __PYX_ERR(0, 18, __pyx_L3_error) __pyx_v_WtW = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_WtW.memview)) __PYX_ERR(0, 18, __pyx_L3_error) __pyx_v_permutation = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_permutation.memview)) __PYX_ERR(0, 18, __pyx_L3_error) __pyx_v_kappa = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_kappa == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 18, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 18, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("gensim.models.nmf_pgd.solve_h", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_6gensim_6models_7nmf_pgd_solve_h(__pyx_self, __pyx_v_h, __pyx_v_Wtv, __pyx_v_WtW, __pyx_v_permutation, __pyx_v_kappa); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_6gensim_6models_7nmf_pgd_solve_h(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_h, __Pyx_memviewslice __pyx_v_Wtv, __Pyx_memviewslice __pyx_v_WtW, __Pyx_memviewslice __pyx_v_permutation, double __pyx_v_kappa) { Py_ssize_t __pyx_v_n_components; CYTHON_UNUSED Py_ssize_t __pyx_v_n_samples; double __pyx_v_violation; double __pyx_v_grad; double __pyx_v_projected_grad; double __pyx_v_hessian; Py_ssize_t __pyx_v_sample_idx; Py_ssize_t __pyx_v_component_idx_1; Py_ssize_t __pyx_v_component_idx_2; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; Py_ssize_t __pyx_t_7; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; double __pyx_t_19; Py_ssize_t __pyx_t_20; Py_ssize_t __pyx_t_21; Py_ssize_t __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; PyObject *__pyx_t_26 = NULL; __Pyx_RefNannySetupContext("solve_h", 0); /* "gensim/models/nmf_pgd.pyx":35 * """ * * cdef Py_ssize_t n_components = h.shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t n_samples = h.shape[1] * cdef double violation = 0 */ __pyx_v_n_components = (__pyx_v_h.shape[0]); /* "gensim/models/nmf_pgd.pyx":36 * * cdef Py_ssize_t n_components = h.shape[0] * cdef Py_ssize_t n_samples = h.shape[1] # <<<<<<<<<<<<<< * cdef double violation = 0 * cdef double grad, projected_grad, hessian */ __pyx_v_n_samples = (__pyx_v_h.shape[1]); /* "gensim/models/nmf_pgd.pyx":37 * cdef Py_ssize_t n_components = h.shape[0] * cdef Py_ssize_t n_samples = h.shape[1] * cdef double violation = 0 # <<<<<<<<<<<<<< * cdef double grad, projected_grad, hessian * cdef Py_ssize_t sample_idx = 0 */ __pyx_v_violation = 0.0; /* "gensim/models/nmf_pgd.pyx":39 * cdef double violation = 0 * cdef double grad, projected_grad, hessian * cdef Py_ssize_t sample_idx = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t component_idx_1 = 0 * cdef Py_ssize_t component_idx_2 = 0 */ __pyx_v_sample_idx = 0; /* "gensim/models/nmf_pgd.pyx":40 * cdef double grad, projected_grad, hessian * cdef Py_ssize_t sample_idx = 0 * cdef Py_ssize_t component_idx_1 = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t component_idx_2 = 0 * */ __pyx_v_component_idx_1 = 0; /* "gensim/models/nmf_pgd.pyx":41 * cdef Py_ssize_t sample_idx = 0 * cdef Py_ssize_t component_idx_1 = 0 * cdef Py_ssize_t component_idx_2 = 0 # <<<<<<<<<<<<<< * * for sample_idx in prange(n_samples, nogil=True): */ __pyx_v_component_idx_2 = 0; /* "gensim/models/nmf_pgd.pyx":43 * cdef Py_ssize_t component_idx_2 = 0 * * for sample_idx in prange(n_samples, nogil=True): # <<<<<<<<<<<<<< * for component_idx_1 in range(n_components): * component_idx_1 = permutation[component_idx_1] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_1 = __pyx_v_n_samples; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_3 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_violation) private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25, __pyx_t_4, __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_t_8, __pyx_t_9) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_component_idx_1) lastprivate(__pyx_v_component_idx_2) lastprivate(__pyx_v_grad) lastprivate(__pyx_v_hessian) lastprivate(__pyx_v_projected_grad) firstprivate(__pyx_v_sample_idx) lastprivate(__pyx_v_sample_idx) #endif /* _OPENMP */ for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ { __pyx_v_sample_idx = (Py_ssize_t)(0 + 1 * __pyx_t_2); /* Initialize private variables to invalid values */ __pyx_v_component_idx_1 = ((Py_ssize_t)0xbad0bad0); __pyx_v_component_idx_2 = ((Py_ssize_t)0xbad0bad0); __pyx_v_grad = ((double)__PYX_NAN()); __pyx_v_hessian = ((double)__PYX_NAN()); __pyx_v_projected_grad = ((double)__PYX_NAN()); /* "gensim/models/nmf_pgd.pyx":44 * * for sample_idx in prange(n_samples, nogil=True): * for component_idx_1 in range(n_components): # <<<<<<<<<<<<<< * component_idx_1 = permutation[component_idx_1] * */ __pyx_t_4 = __pyx_v_n_components; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_component_idx_1 = __pyx_t_6; /* "gensim/models/nmf_pgd.pyx":45 * for sample_idx in prange(n_samples, nogil=True): * for component_idx_1 in range(n_components): * component_idx_1 = permutation[component_idx_1] # <<<<<<<<<<<<<< * * grad = -Wtv[component_idx_1, sample_idx] */ __pyx_t_7 = __pyx_v_component_idx_1; __pyx_v_component_idx_1 = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_permutation.data) + __pyx_t_7)) ))); /* "gensim/models/nmf_pgd.pyx":47 * component_idx_1 = permutation[component_idx_1] * * grad = -Wtv[component_idx_1, sample_idx] # <<<<<<<<<<<<<< * * for component_idx_2 in range(n_components): */ __pyx_t_8 = __pyx_v_component_idx_1; __pyx_t_9 = __pyx_v_sample_idx; __pyx_v_grad = (-(*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_Wtv.data + __pyx_t_8 * __pyx_v_Wtv.strides[0]) ) + __pyx_t_9 * __pyx_v_Wtv.strides[1]) )))); /* "gensim/models/nmf_pgd.pyx":49 * grad = -Wtv[component_idx_1, sample_idx] * * for component_idx_2 in range(n_components): # <<<<<<<<<<<<<< * grad += WtW[component_idx_1, component_idx_2] * h[component_idx_2, sample_idx] * */ __pyx_t_10 = __pyx_v_n_components; __pyx_t_11 = __pyx_t_10; for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { __pyx_v_component_idx_2 = __pyx_t_12; /* "gensim/models/nmf_pgd.pyx":50 * * for component_idx_2 in range(n_components): * grad += WtW[component_idx_1, component_idx_2] * h[component_idx_2, sample_idx] # <<<<<<<<<<<<<< * * hessian = WtW[component_idx_1, component_idx_1] */ __pyx_t_13 = __pyx_v_component_idx_1; __pyx_t_14 = __pyx_v_component_idx_2; __pyx_t_15 = __pyx_v_component_idx_2; __pyx_t_16 = __pyx_v_sample_idx; __pyx_v_grad = (__pyx_v_grad + ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_WtW.data + __pyx_t_13 * __pyx_v_WtW.strides[0]) )) + __pyx_t_14)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_h.data + __pyx_t_15 * __pyx_v_h.strides[0]) )) + __pyx_t_16)) ))))); } /* "gensim/models/nmf_pgd.pyx":52 * grad += WtW[component_idx_1, component_idx_2] * h[component_idx_2, sample_idx] * * hessian = WtW[component_idx_1, component_idx_1] # <<<<<<<<<<<<<< * * grad = grad * kappa / hessian */ __pyx_t_17 = __pyx_v_component_idx_1; __pyx_t_18 = __pyx_v_component_idx_1; __pyx_v_hessian = (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_WtW.data + __pyx_t_17 * __pyx_v_WtW.strides[0]) )) + __pyx_t_18)) ))); /* "gensim/models/nmf_pgd.pyx":54 * hessian = WtW[component_idx_1, component_idx_1] * * grad = grad * kappa / hessian # <<<<<<<<<<<<<< * * projected_grad = fmin(0, grad) if h[component_idx_1, sample_idx] == 0 else grad */ __pyx_v_grad = ((__pyx_v_grad * __pyx_v_kappa) / __pyx_v_hessian); /* "gensim/models/nmf_pgd.pyx":56 * grad = grad * kappa / hessian * * projected_grad = fmin(0, grad) if h[component_idx_1, sample_idx] == 0 else grad # <<<<<<<<<<<<<< * * violation += projected_grad * projected_grad */ __pyx_t_20 = __pyx_v_component_idx_1; __pyx_t_21 = __pyx_v_sample_idx; if ((((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_h.data + __pyx_t_20 * __pyx_v_h.strides[0]) )) + __pyx_t_21)) ))) == 0.0) != 0)) { __pyx_t_19 = __pyx_f_6gensim_6models_7nmf_pgd_fmin(0.0, __pyx_v_grad); } else { __pyx_t_19 = __pyx_v_grad; } __pyx_v_projected_grad = __pyx_t_19; /* "gensim/models/nmf_pgd.pyx":58 * projected_grad = fmin(0, grad) if h[component_idx_1, sample_idx] == 0 else grad * * violation += projected_grad * projected_grad # <<<<<<<<<<<<<< * * h[component_idx_1, sample_idx] = fmax(h[component_idx_1, sample_idx] - grad, 0.) */ __pyx_v_violation = (__pyx_v_violation + (__pyx_v_projected_grad * __pyx_v_projected_grad)); /* "gensim/models/nmf_pgd.pyx":60 * violation += projected_grad * projected_grad * * h[component_idx_1, sample_idx] = fmax(h[component_idx_1, sample_idx] - grad, 0.) # <<<<<<<<<<<<<< * * return sqrt(violation) */ __pyx_t_22 = __pyx_v_component_idx_1; __pyx_t_23 = __pyx_v_sample_idx; __pyx_t_24 = __pyx_v_component_idx_1; __pyx_t_25 = __pyx_v_sample_idx; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_h.data + __pyx_t_24 * __pyx_v_h.strides[0]) )) + __pyx_t_25)) )) = __pyx_f_6gensim_6models_7nmf_pgd_fmax(((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_h.data + __pyx_t_22 * __pyx_v_h.strides[0]) )) + __pyx_t_23)) ))) - __pyx_v_grad), 0.); } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "gensim/models/nmf_pgd.pyx":43 * cdef Py_ssize_t component_idx_2 = 0 * * for sample_idx in prange(n_samples, nogil=True): # <<<<<<<<<<<<<< * for component_idx_1 in range(n_components): * component_idx_1 = permutation[component_idx_1] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "gensim/models/nmf_pgd.pyx":62 * h[component_idx_1, sample_idx] = fmax(h[component_idx_1, sample_idx] - grad, 0.) * * return sqrt(violation) # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __pyx_t_26 = PyFloat_FromDouble(sqrt(__pyx_v_violation)); if (unlikely(!__pyx_t_26)) __PYX_ERR(0, 62, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_26); __pyx_r = __pyx_t_26; __pyx_t_26 = 0; goto __pyx_L0; /* "gensim/models/nmf_pgd.pyx":18 * return x if x > y else y * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): # <<<<<<<<<<<<<< * """Find optimal dense vector representation for current W and r matrices. * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_26); __Pyx_AddTraceback("gensim.models.nmf_pgd.solve_h", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_h, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_Wtv, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_WtW, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_permutation, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) } else { /* "View.MemoryView":123 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":129 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 129, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":130 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 133, __pyx_L1_error) /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 136, __pyx_L1_error) /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":139 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":140 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) __pyx_t_3 = __pyx_v_format; __Pyx_INCREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":141 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(1, 141, __pyx_L1_error) } __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) __pyx_v_self->format = __pyx_t_7; /* "View.MemoryView":144 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":145 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 148, __pyx_L1_error) /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_8 = 0; __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_9; __pyx_v_idx = __pyx_t_8; __pyx_t_8 = (__pyx_t_8 + 1); /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":153 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 153, __pyx_L1_error) /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":154 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":158 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":159 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) if (likely(__pyx_t_4)) { /* "View.MemoryView":161 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":162 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":164 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 164, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":166 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":169 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":170 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 176, __pyx_L1_error) /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":179 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":180 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 180, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 180, __pyx_L1_error) } __pyx_t_1 = (__pyx_v_self->len / __pyx_v_itemsize); __pyx_t_9 = __pyx_t_1; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "View.MemoryView":181 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":182 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":186 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":188 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 192, __pyx_L1_error) /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":193 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":194 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":195 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":196 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":197 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":198 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":199 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":200 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":203 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":205 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":207 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":216 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":218 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":219 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":223 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":227 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":228 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":231 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":234 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":237 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":240 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":249 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":252 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":253 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":255 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":282 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":284 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_v_state = 0; PyObject *__pyx_v__dict = 0; int __pyx_v_use_setstate; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":5 * cdef object _dict * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":6 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":8 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":9 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":11 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":13 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":17 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":300 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":304 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":307 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":309 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":346 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":347 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":349 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":351 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":352 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":356 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":357 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":359 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":361 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":364 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":366 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":368 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":370 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyThread_type_lock __pyx_t_6; PyThread_type_lock __pyx_t_7; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":374 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * elif (<__pyx_buffer *> &self.view).obj == Py_None: * */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ goto __pyx_L3; } /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); if (__pyx_t_2) { /* "View.MemoryView":377 * elif (<__pyx_buffer *> &self.view).obj == Py_None: * * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< * Py_DECREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; /* "View.MemoryView":378 * * (<__pyx_buffer *> &self.view).obj = NULL * Py_DECREF(Py_None) # <<<<<<<<<<<<<< * * cdef int i */ Py_DECREF(Py_None); /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ } __pyx_L3:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":383 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; __pyx_t_4 = __pyx_t_3; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":385 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":388 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":387 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":389 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":391 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":395 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 397, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":398 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":400 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":405 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":407 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 407, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":411 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":413 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":414 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ __pyx_t_1 = (__pyx_v_self->view.readonly != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 418, __pyx_L1_error) /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ } /* "View.MemoryView":420 * raise TypeError("Cannot assign to read-only memoryview") * * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (likely(__pyx_t_2 != Py_None)) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 420, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_3; __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":423 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_obj = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":425 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L5; } /* "View.MemoryView":427 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error) __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L5:; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L4; } /* "View.MemoryView":429 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L4:; /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":435 * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; /* "View.MemoryView":436 * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":437 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":439 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; __Pyx_memviewslice *__pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error) __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":446 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error) __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error) /* "View.MemoryView":447 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; char const *__pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":451 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":456 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error) __pyx_v_dst_slice = __pyx_t_1; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_2) { /* "View.MemoryView":459 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":461 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error) /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":462 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":464 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":466 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_2) { /* "View.MemoryView":468 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":470 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L8:; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":475 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":476 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":479 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __Pyx_XGOTREF(__pyx_t_12); __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":482 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":483 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":488 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":491 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":493 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":498 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":499 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":494 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_1); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 495, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":504 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":510 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":512 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 514, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; char *__pyx_t_5; void *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = (__pyx_v_self->view.readonly != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 520, __pyx_L1_error) /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ } /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); if (__pyx_t_1) { /* "View.MemoryView":523 * * if flags & PyBUF_ND: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_4 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_4; /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L6; } /* "View.MemoryView":525 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L6:; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":528 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_4 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_4; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L7; } /* "View.MemoryView":530 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L7:; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":533 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_4 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_4; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L8; } /* "View.MemoryView":535 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L8:; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":538 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_5 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_5; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L9; } /* "View.MemoryView":540 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L9:; /* "View.MemoryView":542 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_6 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_6; /* "View.MemoryView":543 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_7 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_7; /* "View.MemoryView":544 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = self.view.readonly */ __pyx_t_8 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_8; /* "View.MemoryView":545 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = self.view.readonly * info.obj = self */ __pyx_t_8 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_8; /* "View.MemoryView":546 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = self.view.readonly # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_t_1 = __pyx_v_self->view.readonly; __pyx_v_info->readonly = __pyx_t_1; /* "View.MemoryView":547 * info.len = self.view.len * info.readonly = self.view.readonly * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":554 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":555 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error) /* "View.MemoryView":556 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":560 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":564 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 570, __pyx_L1_error) /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":572 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__12, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":579 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":583 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":587 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":591 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":596 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":598 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":599 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":601 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":603 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":607 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":609 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":613 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":616 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":622 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":623 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":628 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":629 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":633 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":635 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":636 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":641 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":645 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":647 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":648 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":653 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":658 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":659 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":660 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":664 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":672 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":674 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":676 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":677 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":678 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 679, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__15); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":683 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":685 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__15); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":686 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":689 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(1, 689, __pyx_L1_error) /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":691 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":692 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":694 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":696 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__15); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":698 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L0; /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":701 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 703, __pyx_L1_error) /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":711 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":718 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); /* "View.MemoryView":722 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 722, __pyx_L1_error) } } #endif /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":725 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":726 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":728 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":729 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":735 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":736 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":741 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":742 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 746, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":751 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error) /* "View.MemoryView":748 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error) /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":755 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":756 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":757 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":758 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":760 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":761 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":762 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":764 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":765 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":766 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":768 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error) /* "View.MemoryView":774 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":778 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) } /* "View.MemoryView":779 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) } /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":783 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":830 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":832 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error) /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":835 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":838 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error) /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":843 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":845 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":848 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":850 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":853 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":855 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":859 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":861 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":863 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":866 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":868 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":871 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":875 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":878 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":881 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":884 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":885 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":886 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":890 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":892 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":897 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":899 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":900 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":902 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":904 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":912 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":913 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":917 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 917, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 917, __pyx_L1_error) } __pyx_v_shape = (__pyx_v_view->len / __pyx_v_itemsize); /* "View.MemoryView":918 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":920 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":921 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":923 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":926 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":928 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 928, __pyx_L1_error) /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":931 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 931, __pyx_L1_error) /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":933 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":935 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":937 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; long __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; /* "View.MemoryView":944 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":946 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":947 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":951 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = (__pyx_v_ndim / 2); __pyx_t_4 = __pyx_t_3; for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":952 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":953 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; /* "View.MemoryView":954 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L6_bool_binop_done; } __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L6_bool_binop_done:; if (__pyx_t_7) { /* "View.MemoryView":957 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error) /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":959 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":977 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":981 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":983 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":987 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error) /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":989 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":993 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":1008 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1013 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1015 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1016 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1018 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1019 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1021 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1022 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1023 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1024 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1025 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: */ Py_INCREF(Py_None); /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); if (__pyx_t_1) { /* "View.MemoryView":1028 * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * else: * result.flags = PyBUF_RECORDS_RO */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ goto __pyx_L4; } /* "View.MemoryView":1030 * result.flags = PyBUF_RECORDS * else: * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ /*else*/ { __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; } __pyx_L4:; /* "View.MemoryView":1032 * result.flags = PyBUF_RECORDS_RO * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1033 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1036 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1037 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1039 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1040 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L6_break; /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L6_break:; /* "View.MemoryView":1042 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1043 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1044 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1046 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1047 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1049 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1056 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1057 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1059 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1060 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1067 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1068 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1069 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1071 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1072 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1074 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_dim = __pyx_t_4; /* "View.MemoryView":1075 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1076 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1077 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_5 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1083 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1084 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1095 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1096 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1098 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1099 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1101 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1103 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1111 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1113 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1121 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1122 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1124 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1126 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1127 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1129 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_1; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1131 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1132 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1135 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1137 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; /* "View.MemoryView":1147 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1148 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1149 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1150 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1154 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1155 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1157 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1158 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); /* "View.MemoryView":1159 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1160 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1162 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1163 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1167 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1168 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1173 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; /* "View.MemoryView":1179 * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for shape in src.shape[:ndim]: */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1181 * cdef Py_ssize_t shape, size = src.memview.view.itemsize * * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< * size *= shape * */ __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_shape = (__pyx_t_2[0]); /* "View.MemoryView":1182 * * for shape in src.shape[:ndim]: * size *= shape # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * __pyx_v_shape); } /* "View.MemoryView":1184 * size *= shape * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1197 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_idx = __pyx_t_4; /* "View.MemoryView":1198 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1199 * for idx in range(ndim): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1201 * stride *= shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1202 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1203 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1205 * stride *= shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_t_6; /* "View.MemoryView":1219 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1220 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1222 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1224 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error) /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1227 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1228 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1229 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1230 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1231 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1233 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); /* "View.MemoryView":1237 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1239 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1242 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1244 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1246 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1254 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1253 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 1253, __pyx_L1_error) /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1258 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1258, __pyx_L1_error) /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":1263 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1263, __pyx_L1_error) /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1265 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1265, __pyx_L1_error) } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; void *__pyx_t_7; int __pyx_t_8; /* "View.MemoryView":1276 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1277 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1279 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1280 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1281 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1285 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1287 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1289 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1291 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_5; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1294 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1295 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1297 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1300 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error) /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1305 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1307 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_7; /* "View.MemoryView":1308 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1314 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1316 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1320 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1321 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); /* "View.MemoryView":1322 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1323 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1324 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_8 = (__pyx_t_2 != 0); if (__pyx_t_8) { /* "View.MemoryView":1329 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error) /* "View.MemoryView":1330 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error) /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1332 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1333 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1334 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1336 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1337 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1344 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1346 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1347 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1348 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1349 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1351 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1352 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1353 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1354 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1367 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1374 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1381 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_4) { /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_4 = (__pyx_v_inc != 0); if (__pyx_t_4) { /* "View.MemoryView":1384 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1386 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1388 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1389 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1391 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1400 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1401 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1403 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; /* "View.MemoryView":1411 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1412 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1415 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1416 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); /* "View.MemoryView":1417 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1419 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1420 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1422 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = 0; PyObject *__pyx_v___pyx_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":5 * cdef object __pyx_result * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 6, __pyx_L1_error) /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":7 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_6 = (__pyx_t_1 != 0); if (__pyx_t_6) { /* "(tree fragment)":9 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":10 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); /* "(tree fragment)":12 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 13, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":14 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 14, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "gensim.models.nmf_pgd.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "gensim.models.nmf_pgd.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "gensim.models.nmf_pgd.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "gensim.models.nmf_pgd._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_nmf_pgd(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_nmf_pgd}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "nmf_pgd", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_n_s_WtW, __pyx_k_WtW, sizeof(__pyx_k_WtW), 0, 0, 1, 1}, {&__pyx_n_s_Wtv, __pyx_k_Wtv, sizeof(__pyx_k_Wtv), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_component_idx_1, __pyx_k_component_idx_1, sizeof(__pyx_k_component_idx_1), 0, 0, 1, 1}, {&__pyx_n_s_component_idx_2, __pyx_k_component_idx_2, sizeof(__pyx_k_component_idx_2), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_gensim_models_nmf_pgd, __pyx_k_gensim_models_nmf_pgd, sizeof(__pyx_k_gensim_models_nmf_pgd), 0, 0, 1, 1}, {&__pyx_kp_s_gensim_models_nmf_pgd_pyx, __pyx_k_gensim_models_nmf_pgd_pyx, sizeof(__pyx_k_gensim_models_nmf_pgd_pyx), 0, 0, 1, 0}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_grad, __pyx_k_grad, sizeof(__pyx_k_grad), 0, 0, 1, 1}, {&__pyx_n_s_h, __pyx_k_h, sizeof(__pyx_k_h), 0, 0, 1, 1}, {&__pyx_n_s_hessian, __pyx_k_hessian, sizeof(__pyx_k_hessian), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_kappa, __pyx_k_kappa, sizeof(__pyx_k_kappa), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_n_components, __pyx_k_n_components, sizeof(__pyx_k_n_components), 0, 0, 1, 1}, {&__pyx_n_s_n_samples, __pyx_k_n_samples, sizeof(__pyx_k_n_samples), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_permutation, __pyx_k_permutation, sizeof(__pyx_k_permutation), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_projected_grad, __pyx_k_projected_grad, sizeof(__pyx_k_projected_grad), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_sample_idx, __pyx_k_sample_idx, sizeof(__pyx_k_sample_idx), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_solve_h, __pyx_k_solve_h, sizeof(__pyx_k_solve_h), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_violation, __pyx_k_violation, sizeof(__pyx_k_violation), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 44, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 495, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__12 = PyTuple_New(1); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__12, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__12); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__15 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__15)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "gensim/models/nmf_pgd.pyx":18 * return x if x > y else y * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): # <<<<<<<<<<<<<< * """Find optimal dense vector representation for current W and r matrices. * */ __pyx_tuple__19 = PyTuple_Pack(14, __pyx_n_s_h, __pyx_n_s_Wtv, __pyx_n_s_WtW, __pyx_n_s_permutation, __pyx_n_s_kappa, __pyx_n_s_n_components, __pyx_n_s_n_samples, __pyx_n_s_violation, __pyx_n_s_grad, __pyx_n_s_projected_grad, __pyx_n_s_hessian, __pyx_n_s_sample_idx, __pyx_n_s_component_idx_1, __pyx_n_s_component_idx_2); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(0, 18, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); __pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(5, 0, 14, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_gensim_models_nmf_pgd_pyx, __pyx_n_s_solve_h, 18, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) __PYX_ERR(0, 18, __pyx_L1_error) /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_tuple__26 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); __pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_array.tp_print = 0; #endif if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_MemviewEnum.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryview.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryviewslice.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #if PY_MAJOR_VERSION < 3 #ifdef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC void #else #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #endif #else #ifdef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyObject * #else #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initnmf_pgd(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initnmf_pgd(void) #else __Pyx_PyMODINIT_FUNC PyInit_nmf_pgd(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_nmf_pgd(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_nmf_pgd(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; static PyThread_type_lock __pyx_t_2[8]; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'nmf_pgd' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_nmf_pgd(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("nmf_pgd", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_gensim__models__nmf_pgd) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "gensim.models.nmf_pgd")) { if (unlikely(PyDict_SetItemString(modules, "gensim.models.nmf_pgd", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) goto __pyx_L1_error; /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) goto __pyx_L1_error; /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() != 0)) goto __pyx_L1_error; (void)__Pyx_modinit_type_import_code(); (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "gensim/models/nmf_pgd.pyx":18 * return x if x > y else y * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): # <<<<<<<<<<<<<< * """Find optimal dense vector representation for current W and r matrices. * */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_6gensim_6models_7nmf_pgd_1solve_h, NULL, __pyx_n_s_gensim_models_nmf_pgd); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_solve_h, __pyx_t_1) < 0) __PYX_ERR(0, 18, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "gensim/models/nmf_pgd.pyx":1 * # Author: Timofey Yefimov # <<<<<<<<<<<<<< * * # cython: cdivision=True */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":209 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":316 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":317 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_2[0] = PyThread_allocate_lock(); __pyx_t_2[1] = PyThread_allocate_lock(); __pyx_t_2[2] = PyThread_allocate_lock(); __pyx_t_2[3] = PyThread_allocate_lock(); __pyx_t_2[4] = PyThread_allocate_lock(); __pyx_t_2[5] = PyThread_allocate_lock(); __pyx_t_2[6] = PyThread_allocate_lock(); __pyx_t_2[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":549 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":995 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init gensim.models.nmf_pgd", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init gensim.models.nmf_pgd"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (memviewslice->memview || memviewslice->data) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview || (PyObject *) memview == Py_None) return; if (__pyx_get_slice_count(memview) < 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (first_time) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview ) { return; } else if ((PyObject *) memview == Py_None) { memslice->memview = NULL; return; } if (__pyx_get_slice_count(memview) <= 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (last_time) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } length = stop - start; if (unlikely(length <= 0)) return PyUnicode_FromUnicode(NULL, 0); cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_cython); if (unlikely(!reduce_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto BAD; setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate_cython); if (unlikely(!setstate_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto BAD; } PyType_Modified((PyTypeObject*)type_obj); } } goto GOOD; BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case '?': return "'bool'"; case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (buf->strides[dim] != sizeof(void *)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (buf->strides[dim] != buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (stride < buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (spec & (__Pyx_MEMVIEW_PTR)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (buf->suboffsets) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (buf->suboffsets && buf->suboffsets[dim] >= 0) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (!buf->suboffsets || (buf->suboffsets[dim] < 0)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (buf->ndim != ndim) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned) buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (!__pyx_check_strides(buf, i, ndim, spec)) goto fail; if (!__pyx_check_suboffsets(buf, i, ndim, spec)) goto fail; } if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS_RO | writable_flag, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (from_mvs->suboffsets[i] >= 0) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
transpose.c
#include "main.h" mat_rv transpose_csr_csc_nothreading(csr matrix){ mat_rv rv; int buf = matrix.cols; matrix.cols = matrix.rows; matrix.rows = buf; rv = csc_to_mat_nothreading(matrix); //process time is 0 as treating csr as csc results in an already transposed matrix rv.t_process.tv_sec = 0; rv.t_process.tv_nsec = 0; return rv; } mat_rv transpose_csr_csc(csr matrix, int thread_count){ mat_rv rv; int buf = matrix.cols; matrix.cols = matrix.rows; matrix.rows = buf; rv = csc_to_mat(matrix, thread_count); //process time is 0 as treating csr as csc results in an already transposed matrix rv.t_process.tv_sec = 0; rv.t_process.tv_nsec = 0; return rv; } mat_rv transpose_coo_nothreading(coo matrix) { mat_rv rv; struct timespec start, end; get_utc_time(&start); for(int i = 0; i < matrix.length; ++i){ int temp_i = matrix.elems[i].i; matrix.elems[i].i = matrix.elems[i].j; matrix.elems[i].j = temp_i; } get_utc_time(&end); int temp_cols = matrix.cols; matrix.cols = matrix.rows; matrix.rows = temp_cols; //matrix isn't in ordered form but isn't important for conversion rv = coo_to_mat_nothreading(matrix); rv.t_process = time_delta(end, start); return rv; } //no speedup currently mat_rv transpose_coo(coo matrix, int thread_count) { mat_rv rv; struct timespec start, end; int i; get_utc_time(&start); #pragma omp parallel for num_threads(thread_count) private(i) shared(matrix) for(i = 0; i < matrix.length; ++i){ int temp_i = matrix.elems[i].i; matrix.elems[i].i = matrix.elems[i].j; matrix.elems[i].j = temp_i; } get_utc_time(&end); int temp_cols = matrix.cols; matrix.cols = matrix.rows; matrix.rows = temp_cols; //matrix isn't in ordered form but isn't important for conversion rv = coo_to_mat(matrix, thread_count); rv.t_process = time_delta(end, start); return rv; } mat_rv transpose_csr_nothreading(csr matrix) { mat_rv rv; struct timespec start, end; get_utc_time(&start); csr result; result.rows = matrix.cols; result.cols = matrix.rows; result.type = matrix.type; result.num_vals = matrix.num_vals; if(!(result.ia = (int*)calloc(result.rows + 1, sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } if(!(result.ja = (int*)malloc(result.num_vals*sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } if(result.type == MAT_INT){ if(!(result.nnz.i = (int*)malloc(result.num_vals*sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } else{ if(!(result.nnz.f = (long double*)malloc(result.num_vals*sizeof(long double)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } //creating a new array to contain lengths so don't have to search for //next nonzero element and make complexity n^2 int *row_lens; if(!(row_lens = (int*)calloc(result.rows, sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating col_lens\n"); exit(EXIT_FAILURE); } //store lengths of rows in result.ia for(int i = 0; i < matrix.num_vals; ++i) result.ia[matrix.ja[i] + 1]++; //calculate ia for(int i = 0; i < result.rows + 1; ++i) result.ia[i + 1] += result.ia[i]; //cannot parallelise below for(int i = 0; i < matrix.rows; ++i){ for(int j = matrix.ia[i]; j < matrix.ia[i + 1]; ++j){ if(result.type == MAT_INT) result.nnz.i[result.ia[matrix.ja[j]] + row_lens[matrix.ja[j]]] = matrix.nnz.i[j]; else result.nnz.f[result.ia[matrix.ja[j]] + row_lens[matrix.ja[j]]] = matrix.nnz.f[j]; result.ja[result.ia[matrix.ja[j]] + row_lens[matrix.ja[j]]++] = i; } } free(row_lens); get_utc_time(&end); rv = csr_to_mat_nothreading(result); rv.t_process = time_delta(end, start); free_csr(result); return rv; } mat_rv transpose_csr(csr matrix, int thread_count) { mat_rv rv; struct timespec start, end; get_utc_time(&start); csr result; result.rows = matrix.cols; result.cols = matrix.rows; result.type = matrix.type; result.num_vals = matrix.num_vals; if(!(result.ia = (int*)calloc(result.rows + 1, sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } if(!(result.ja = (int*)malloc(result.num_vals*sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } if(result.type == MAT_INT){ if(!(result.nnz.i = (int*)malloc(result.num_vals*sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } else{ if(!(result.nnz.f = (long double*)malloc(result.num_vals*sizeof(long double)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } //creating a new array to contain lengths so don't have to search for //next nonzero element and make complexity n^2 int *row_lens; if(!(row_lens = (int*)calloc(result.rows, sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating col_lens\n"); exit(EXIT_FAILURE); } //store lengths of rows in result.ia for(int i = 0; i < matrix.num_vals; ++i) result.ia[matrix.ja[i] + 1]++; //calculate ia for(int i = 0; i < result.rows + 1; ++i) result.ia[i + 1] += result.ia[i]; //cannot parallelise below for(int i = 0; i < matrix.rows; ++i){ for(int j = matrix.ia[i]; j < matrix.ia[i + 1]; ++j){ if(result.type == MAT_INT) result.nnz.i[result.ia[matrix.ja[j]] + row_lens[matrix.ja[j]]] = matrix.nnz.i[j]; else result.nnz.f[result.ia[matrix.ja[j]] + row_lens[matrix.ja[j]]] = matrix.nnz.f[j]; result.ja[result.ia[matrix.ja[j]] + row_lens[matrix.ja[j]]++] = i; } } free(row_lens); get_utc_time(&end); rv = csr_to_mat(result, thread_count); rv.t_process = time_delta(end, start); free_csr(result); return rv; } mat_rv transpose_csc_nothreading(csc matrix) { mat_rv rv; struct timespec start, end; get_utc_time(&start); csc result; result.rows = matrix.cols; result.cols = matrix.rows; result.type = matrix.type; result.num_vals = matrix.num_vals; if(!(result.ia = (int*)calloc(result.cols + 1, sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } if(!(result.ja = (int*)malloc(result.num_vals*sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } if(result.type == MAT_INT){ if(!(result.nnz.i = (int*)malloc(result.num_vals*sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } else{ if(!(result.nnz.f = (long double*)malloc(result.num_vals*sizeof(long double)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } //creating a new array to contain lengths so don't have to search for //next nonzero element and make complexity n^2 int *col_lens; if(!(col_lens = (int*)calloc(result.cols, sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating row_lens\n"); exit(EXIT_FAILURE); } //store lengths of columns in result.ia for(int i = 0; i < matrix.num_vals; ++i) result.ia[matrix.ja[i] + 1]++; //calculate ia for(int i = 0; i < result.cols + 1; ++i) result.ia[i + 1] += result.ia[i]; for(int i = 0; i < matrix.cols; ++i){ for(int j = matrix.ia[i]; j < matrix.ia[i + 1]; ++j){ if(result.type == MAT_INT) result.nnz.i[result.ia[matrix.ja[j]] + col_lens[matrix.ja[j]]] = matrix.nnz.i[j]; else result.nnz.f[result.ia[matrix.ja[j]] + col_lens[matrix.ja[j]]] = matrix.nnz.f[j]; result.ja[result.ia[matrix.ja[j]] + col_lens[matrix.ja[j]]++] = i; } } free(col_lens); get_utc_time(&end); rv = csc_to_mat_nothreading(result); rv.t_process = time_delta(end, start); free_csc(result); return rv; } mat_rv transpose_csc(csc matrix, int thread_count) { mat_rv rv; struct timespec start, end; get_utc_time(&start); csc result; result.rows = matrix.cols; result.cols = matrix.rows; result.type = matrix.type; result.num_vals = matrix.num_vals; if(!(result.ia = (int*)calloc(result.cols + 1, sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } if(!(result.ja = (int*)malloc(result.num_vals*sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } if(result.type == MAT_INT){ if(!(result.nnz.i = (int*)malloc(result.num_vals*sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } else{ if(!(result.nnz.f = (long double*)malloc(result.num_vals*sizeof(long double)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } //creating a new array to contain lengths so don't have to search for //next nonzero element and make complexity n^2 int *col_lens; if(!(col_lens = (int*)calloc(result.cols, sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating row_lens\n"); exit(EXIT_FAILURE); } //store lengths of columns in result.ia for(int i = 0; i < matrix.num_vals; ++i) result.ia[matrix.ja[i] + 1]++; //calculate ia for(int i = 0; i < result.cols + 1; ++i) result.ia[i + 1] += result.ia[i]; //cannot parallelise below for(int i = 0; i < matrix.cols; ++i){ for(int j = matrix.ia[i]; j < matrix.ia[i + 1]; ++j){ if(result.type == MAT_INT) result.nnz.i[result.ia[matrix.ja[j]] + col_lens[matrix.ja[j]]] = matrix.nnz.i[j]; else result.nnz.f[result.ia[matrix.ja[j]] + col_lens[matrix.ja[j]]] = matrix.nnz.f[j]; result.ja[result.ia[matrix.ja[j]] + col_lens[matrix.ja[j]]++] = i; } } free(col_lens); get_utc_time(&end); rv = csc_to_mat(result, thread_count); rv.t_process = time_delta(end, start); free_csc(result); return rv; } mat_rv transpose(OPERATIONARGS *args) { mat_rv rv; //default CSR -> CSC switch(args->format){ case FORM_DEFAULT:{ struct timespec construct; struct timespec fileio_timer; csr matrix = read_csr(args->file1, &construct, &fileio_timer); if(args->nothreading) rv = transpose_csr_csc_nothreading(matrix); else rv = transpose_csr_csc(matrix, args->num_threads); rv.t_construct = time_sum(rv.t_construct, construct); rv.t_fileio = fileio_timer; free_csr(matrix); return rv; break; } case COO:{ struct timespec construct; struct timespec fileio_timer; coo matrix = read_coo(args->file1, &construct, &fileio_timer); if(args->nothreading) rv = transpose_coo_nothreading(matrix); else rv = transpose_coo(matrix, args->num_threads); rv.t_construct = time_sum(rv.t_construct, construct); rv.t_fileio = fileio_timer; free_coo(matrix); return rv; break; } case CSR:{ struct timespec construct; struct timespec fileio_timer; csr matrix = read_csr(args->file1, &construct, &fileio_timer); if(args->nothreading) rv = transpose_csr_nothreading(matrix); else rv = transpose_csr(matrix, args->num_threads); rv.t_construct = time_sum(rv.t_construct, construct); rv.t_fileio = fileio_timer; free_csr(matrix); return rv; break; } case CSC:{ struct timespec construct; struct timespec fileio_timer; csc matrix = read_csc(args->file1, &construct, &fileio_timer); if(args->nothreading) rv = transpose_csc_nothreading(matrix); else rv = transpose_csc(matrix, args->num_threads); rv.t_construct = time_sum(rv.t_construct, construct); rv.t_fileio = fileio_timer; free_csc(matrix); return rv; break; } default: fprintf(stderr, "format not implemented\n"); exit(EXIT_FAILURE); break; } //execution should never reach here rv.error = ERR_NOT_SET; return rv; }
constoprim.c
/* A simple 2D hydro code (C) Romain Teyssier : CEA/IRFU -- original F90 code (C) Pierre-Francois Lavallee : IDRIS -- original F90 code (C) Guillaume Colin de Verdiere : CEA/DAM -- for the C version */ /* This software is governed by the CeCILL license under French law and abiding by the rules of distribution of free software. You can use, modify and/ or redistribute the software under the terms of the CeCILL license as circulated by CEA, CNRS and INRIA at the following URL "http://www.cecill.info". As a counterpart to the access to the source code and rights to copy, modify and redistribute granted by the license, users are provided only with a limited warranty and the software's author, the holder of the economic rights, and the successive licensors have only limited liability. In this respect, the user's attention is drawn to the risks associated with loading, using, modifying and/or developing or reproducing the software by the user in light of its specific status of free software, that may mean that it is complicated to manipulate, and that also therefore means that it is reserved for developers and experienced professionals having in-depth computer knowledge. Users are therefore encouraged to load and test the software's suitability as regards their requirements in conditions enabling the security of their systems and/or data to be ensured and, more generally, to use and operate it in the same conditions as regards security. The fact that you are presently reading this means that you have had knowledge of the CeCILL license and that you accept its terms. */ #include <stdlib.h> #include <unistd.h> #include <math.h> #include <stdio.h> #ifndef HMPP #include "parametres.h" #include "constoprim.h" #include "perfcnt.h" #include "utils.h" void constoprim(const int n, const int Hnxyt, const int Hnvar, const real_t Hsmallr, const int slices, const int Hstep, real_t u[Hnvar][Hstep][Hnxyt], real_t q[Hnvar][Hstep][Hnxyt], real_t e[Hstep][Hnxyt]) { int ijmin, ijmax, IN, i, s; real_t eken; // const int nxyt = Hnxyt; WHERE("constoprim"); ijmin = 0; ijmax = n; #pragma omp parallel for private(i, s, eken), shared(q,e) COLLAPSE for (s = 0; s < slices; s++) { for (i = ijmin; i < ijmax; i++) { real_t qid = MAX(u[ID][s][i], Hsmallr); q[ID][s][i] = qid; real_t qiu = u[IU][s][i] / qid; real_t qiv = u[IV][s][i] / qid; q[IU][s][i] = qiu; q[IV][s][i] = qiv; eken = half * (Square(qiu) + Square(qiv)); real_t qip = u[IP][s][i] / qid - eken; q[IP][s][i] = qip; e[s][i] = qip; } } { int nops = slices * ((ijmax) - (ijmin)); FLOPS(5 * nops, 3 * nops, 1 * nops, 0 * nops); } if (Hnvar > IP) { for (IN = IP + 1; IN < Hnvar; IN++) { for (s = 0; s < slices; s++) { for (i = ijmin; i < ijmax; i++) { q[IN][s][i] = u[IN][s][i] / q[IN][s][i]; } } } } } // constoprim #undef IHVW #endif //EOF
GB_unop__frexpe_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__frexpe_fp64_fp64 // op(A') function: GB_unop_tran__frexpe_fp64_fp64 // C type: double // A type: double // cast: double cij = aij // unaryop: cij = GB_frexpe (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_frexpe (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = GB_frexpe (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FREXPE || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__frexpe_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = GB_frexpe (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = GB_frexpe (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__frexpe_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ast-dump-openmp-cancel.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test() { #pragma omp parallel { #pragma omp cancel parallel } } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-cancel.c:3:1, line:8:1> line:3:6 test 'void ()' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:8:1> // CHECK-NEXT: `-OMPParallelDirective {{.*}} <line:4:1, col:21> // CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3, line:7:3> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-CompoundStmt {{.*}} <line:5:3, line:7:3> // CHECK-NEXT: | `-OMPCancelDirective {{.*}} <line:6:1, col:28> openmp_standalone_directive // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-cancel.c:4:1) *const restrict'
main.c
/************************************************************** The program reads a BMP image file and creates a new image that is the negative or desaturated of the input file. **************************************************************/ #include "qdbmp.h" #include <stdio.h> #include <omp.h> #include <stdlib.h> typedef enum {desaturate, negative} ImgProcessing ; /* Creates a negative image of the input bitmap file */ int main() { const char* inFile = "okanagan.bmp"; const char* outFile = "okanagan_processed.bmp"; const ImgProcessing processingType = desaturate; //or negative UCHAR r, g, b; UINT width, height; UINT x, y; BMP* bmp; /* Read an image file */ bmp = BMP_ReadFile(inFile); BMP_CHECK_ERROR(stdout, -1); /* Get image's dimensions */ width = BMP_GetWidth(bmp); height = BMP_GetHeight(bmp); /* Input number of threads */ int n_threads; printf("Input number of threads:\n"); scanf("%d", &n_threads); if (height%n_threads != 0) { fprintf(stderr, "warning: Recommend number of threads divisible by image height.\n"); } int seg_lenth = height / n_threads; double t = omp_get_wtime(); /* Iterate through all the image's pixels */ #pragma omp parallel num_threads(n_threads) private(r,g,b,x,y) { int start, end, tid = omp_get_thread_num(); start = tid * seg_lenth; end = (tid + 1) * seg_lenth; for (x = 0; x < width; ++x) { for (y = start; y < end; ++y) { /* Get pixel's RGB values */ BMP_GetPixelRGB(bmp, x, y, &r, &g, &b); /* Write new RGB values */ if(processingType == negative) BMP_SetPixelRGB(bmp, x, y, 255 - r, 255 - g, 255 - b); else if(processingType == desaturate){ UCHAR gray = r * 0.3 + g * 0.59 + b * 0.11; BMP_SetPixelRGB(bmp, x, y, gray, gray, gray); } } } } /* calculate and print processing time*/ t = 1000 * (omp_get_wtime() - t); printf("Finished image processing in %.1f ms.", t); /* Save result */ BMP_WriteFile(bmp, outFile); BMP_CHECK_ERROR(stdout, -2); /* Free all memory allocated for the image */ BMP_Free(bmp); return 0; } /* num_threads exe_time 2 241.8ms 4 247.9ms 8 191.6ms 16 147.4ms 24 132.2ms */
GB_binop__div_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__div_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__div_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__div_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__div_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint32) // A*D function (colscale): GB (_AxD__div_uint32) // D*A function (rowscale): GB (_DxB__div_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__div_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__div_uint32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint32) // C=scalar+B GB (_bind1st__div_uint32) // C=scalar+B' GB (_bind1st_tran__div_uint32) // C=A+scalar GB (_bind2nd__div_uint32) // C=A'+scalar GB (_bind2nd_tran__div_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 32) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_UNSIGNED (x, y, 32) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_UINT32 || GxB_NO_DIV_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__div_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__div_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__div_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__div_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__div_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__div_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__div_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__div_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__div_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__div_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__div_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__div_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (x, bij, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__div_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (aij, y, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 32) ; \ } GrB_Info GB (_bind1st_tran__div_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 32) ; \ } GrB_Info GB (_bind2nd_tran__div_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_winograd_transform_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_input_pack4_msa(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; v4f32 _v5_25 = __msa_fill_w_f32(5.25f); v4f32 _vm4_25 = __msa_fill_w_f32(-4.25f); v4f32 _vm1_25 = __msa_fill_w_f32(-1.25f); v4f32 _v0_25 = __msa_fill_w_f32(0.25f); v4f32 _vm2_5 = __msa_fill_w_f32(-2.5f); v4f32 _v0_5 = __msa_fill_w_f32(0.5f); v4f32 _v2 = __msa_fill_w_f32(2.f); v4f32 _v4 = __msa_fill_w_f32(4.f); // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r05 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _r06 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0); v4f32 _r07 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0); v4f32 _tmp0m = __msa_fmadd_w(__msa_fsub_w(_r00, _r06), _v5_25, __msa_fsub_w(_r04, _r02)); v4f32 _tmp7m = __msa_fmadd_w(__msa_fsub_w(_r07, _r01), _v5_25, __msa_fsub_w(_r03, _r05)); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp7m, tmp[7][m], 0); v4f32 _tmp12a = __msa_fmadd_w(__msa_fadd_w(_r02, _r06), _vm4_25, _r04); v4f32 _tmp12b = __msa_fmadd_w(__msa_fadd_w(_r01, _r05), _vm4_25, _r03); v4f32 _tmp1m = __msa_fadd_w(_tmp12a, _tmp12b); v4f32 _tmp2m = __msa_fsub_w(_tmp12a, _tmp12b); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); v4f32 _tmp34a = __msa_fmadd_w(__msa_fmadd_w(_r06, _v0_25, _r02), _vm1_25, _r04); v4f32 _tmp34b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_r01, _v0_5), _vm2_5, _r03), _v2, _r05); v4f32 _tmp3m = __msa_fadd_w(_tmp34a, _tmp34b); v4f32 _tmp4m = __msa_fsub_w(_tmp34a, _tmp34b); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); __msa_st_w((v4i32)_tmp4m, tmp[4][m], 0); v4f32 _tmp56a = __msa_fmadd_w(_r06, _v4, __msa_fmadd_w(_r02, _vm1_25, _r04)); v4f32 _tmp56b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_r01, _v2), _vm2_5, _r03), _v0_5, _r05); v4f32 _tmp5m = __msa_fadd_w(_tmp56a, _tmp56b); v4f32 _tmp6m = __msa_fsub_w(_tmp56a, _tmp56b); __msa_st_w((v4i32)_tmp5m, tmp[5][m], 0); __msa_st_w((v4i32)_tmp6m, tmp[6][m], 0); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5; float* r0_tm_6 = r0_tm_0 + tiles * 4 * 6; float* r0_tm_7 = r0_tm_0 + tiles * 4 * 7; for (int m = 0; m < 8; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _tmp06 = (v4f32)__msa_ld_w(tmp[m][6], 0); v4f32 _tmp07 = (v4f32)__msa_ld_w(tmp[m][7], 0); v4f32 _r0tm0 = __msa_fmadd_w(__msa_fsub_w(_tmp00, _tmp06), _v5_25, __msa_fsub_w(_tmp04, _tmp02)); v4f32 _r0tm7 = __msa_fmadd_w(__msa_fsub_w(_tmp07, _tmp01), _v5_25, __msa_fsub_w(_tmp03, _tmp05)); v4f32 _tmp12a = __msa_fmadd_w(__msa_fadd_w(_tmp02, _tmp06), _vm4_25, _tmp04); v4f32 _tmp12b = __msa_fmadd_w(__msa_fadd_w(_tmp01, _tmp05), _vm4_25, _tmp03); v4f32 _r0tm1 = __msa_fadd_w(_tmp12a, _tmp12b); v4f32 _r0tm2 = __msa_fsub_w(_tmp12a, _tmp12b); v4f32 _tmp34a = __msa_fmadd_w(__msa_fmadd_w(_tmp06, _v0_25, _tmp02), _vm1_25, _tmp04); v4f32 _tmp34b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_tmp01, _v0_5), _vm2_5, _tmp03), _v2, _tmp05); v4f32 _r0tm3 = __msa_fadd_w(_tmp34a, _tmp34b); v4f32 _r0tm4 = __msa_fsub_w(_tmp34a, _tmp34b); v4f32 _tmp56a = __msa_fmadd_w(_tmp06, _v4, __msa_fmadd_w(_tmp02, _vm1_25, _tmp04)); v4f32 _tmp56b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_tmp01, _v2), _vm2_5, _tmp03), _v0_5, _tmp05); v4f32 _r0tm5 = __msa_fadd_w(_tmp56a, _tmp56b); v4f32 _r0tm6 = __msa_fsub_w(_tmp56a, _tmp56b); __msa_st_w((v4i32)_r0tm0, r0_tm_0, 0); __msa_st_w((v4i32)_r0tm1, r0_tm_1, 0); __msa_st_w((v4i32)_r0tm2, r0_tm_2, 0); __msa_st_w((v4i32)_r0tm3, r0_tm_3, 0); __msa_st_w((v4i32)_r0tm4, r0_tm_4, 0); __msa_st_w((v4i32)_r0tm5, r0_tm_5, 0); __msa_st_w((v4i32)_r0tm6, r0_tm_6, 0); __msa_st_w((v4i32)_r0tm7, r0_tm_7, 0); r0_tm_0 += tiles * 4 * 8; r0_tm_1 += tiles * 4 * 8; r0_tm_2 += tiles * 4 * 8; r0_tm_3 += tiles * 4 * 8; r0_tm_4 += tiles * 4 * 8; r0_tm_5 += tiles * 4 * 8; r0_tm_6 += tiles * 4 * 8; r0_tm_7 += tiles * 4 * 8; } } } } } static void conv3x3s1_winograd63_transform_output_pack4_msa(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); v4f32 _bias0 = biasptr ? (v4f32)__msa_ld_w(biasptr + p * 4, 0) : (v4f32)__msa_fill_w(0); float tmp[6][8][4]; v4f32 _v32 = __msa_fill_w_f32(32.f); v4f32 _v16 = __msa_fill_w_f32(16.f); v4f32 _v8 = __msa_fill_w_f32(8.f); v4f32 _v4 = __msa_fill_w_f32(4.f); v4f32 _v2 = __msa_fill_w_f32(2.f); // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5; const float* output0_tm_6 = output0_tm_0 + tiles * 4 * 6; const float* output0_tm_7 = output0_tm_0 + tiles * 4 * 7; float* output0 = out0.row<float>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { v4f32 _out0tm0 = (v4f32)__msa_ld_w(output0_tm_0, 0); v4f32 _out0tm1 = (v4f32)__msa_ld_w(output0_tm_1, 0); v4f32 _out0tm2 = (v4f32)__msa_ld_w(output0_tm_2, 0); v4f32 _out0tm3 = (v4f32)__msa_ld_w(output0_tm_3, 0); v4f32 _out0tm4 = (v4f32)__msa_ld_w(output0_tm_4, 0); v4f32 _out0tm5 = (v4f32)__msa_ld_w(output0_tm_5, 0); v4f32 _out0tm6 = (v4f32)__msa_ld_w(output0_tm_6, 0); v4f32 _out0tm7 = (v4f32)__msa_ld_w(output0_tm_7, 0); v4f32 _tmp024a = __msa_fadd_w(_out0tm1, _out0tm2); v4f32 _tmp135a = __msa_fsub_w(_out0tm1, _out0tm2); v4f32 _tmp024b = __msa_fadd_w(_out0tm3, _out0tm4); v4f32 _tmp135b = __msa_fsub_w(_out0tm3, _out0tm4); v4f32 _tmp024c = __msa_fadd_w(_out0tm5, _out0tm6); v4f32 _tmp135c = __msa_fsub_w(_out0tm5, _out0tm6); v4f32 _tmp0m = __msa_fadd_w(__msa_fadd_w(_out0tm0, _tmp024a), __msa_fmadd_w(_tmp024b, _v32, _tmp024c)); v4f32 _tmp2m = __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v4, _tmp024b), _v8, _tmp024c); v4f32 _tmp4m = __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v16, _tmp024b), _v2, _tmp024c); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); __msa_st_w((v4i32)_tmp4m, tmp[4][m], 0); v4f32 _tmp1m = __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v2, _tmp135b), _v16, _tmp135c); v4f32 _tmp3m = __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v8, _tmp135b), _v4, _tmp135c); v4f32 _tmp5m = __msa_fadd_w(__msa_fadd_w(_out0tm7, _tmp135a), __msa_fmadd_w(_tmp135c, _v32, _tmp135b)); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); __msa_st_w((v4i32)_tmp5m, tmp[5][m], 0); output0_tm_0 += tiles * 4 * 8; output0_tm_1 += tiles * 4 * 8; output0_tm_2 += tiles * 4 * 8; output0_tm_3 += tiles * 4 * 8; output0_tm_4 += tiles * 4 * 8; output0_tm_5 += tiles * 4 * 8; output0_tm_6 += tiles * 4 * 8; output0_tm_7 += tiles * 4 * 8; } for (int m = 0; m < 6; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _tmp06 = (v4f32)__msa_ld_w(tmp[m][6], 0); v4f32 _tmp07 = (v4f32)__msa_ld_w(tmp[m][7], 0); v4f32 _tmp024a = __msa_fadd_w(_tmp01, _tmp02); v4f32 _tmp135a = __msa_fsub_w(_tmp01, _tmp02); v4f32 _tmp024b = __msa_fadd_w(_tmp03, _tmp04); v4f32 _tmp135b = __msa_fsub_w(_tmp03, _tmp04); v4f32 _tmp024c = __msa_fadd_w(_tmp05, _tmp06); v4f32 _tmp135c = __msa_fsub_w(_tmp05, _tmp06); v4f32 _out00 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp00, _tmp024a), __msa_fmadd_w(_tmp024b, _v32, _tmp024c))); v4f32 _out02 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v4, _tmp024b), _v8, _tmp024c)); v4f32 _out04 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v16, _tmp024b), _v2, _tmp024c)); __msa_st_w((v4i32)_out00, output0, 0); __msa_st_w((v4i32)_out02, output0 + 4 * 2, 0); __msa_st_w((v4i32)_out04, output0 + 4 * 4, 0); v4f32 _out01 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v2, _tmp135b), _v16, _tmp135c)); v4f32 _out03 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v8, _tmp135b), _v4, _tmp135c)); v4f32 _out05 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp07, _tmp135a), __msa_fmadd_w(_tmp135c, _v32, _tmp135b))); __msa_st_w((v4i32)_out01, output0 + 4, 0); __msa_st_w((v4i32)_out03, output0 + 4 * 3, 0); __msa_st_w((v4i32)_out05, output0 + 4 * 5, 0); output0 += outw * 4; } } } } } static void conv3x3s1_winograd43_transform_input_pack4_msa(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; // const float itm[6][6] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[6][6][4]; v4f32 _vm5 = __msa_fill_w_f32(-5.f); v4f32 _vm4 = __msa_fill_w_f32(-4.f); v4f32 _v4 = __msa_fill_w_f32(4.f); v4f32 _vm2 = __msa_fill_w_f32(-2.f); v4f32 _v2 = __msa_fill_w_f32(2.f); // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r05 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _tmp0m = __msa_fmadd_w(__msa_fmadd_w(_r04, _v4, _r00), _vm5, _r02); v4f32 _tmp1m = __msa_fmadd_w(__msa_fadd_w(_r04, _r03), _vm4, __msa_fadd_w(_r01, _r02)); v4f32 _tmp2m = __msa_fmadd_w(__msa_fsub_w(_r04, _r03), _v4, __msa_fsub_w(_r01, _r02)); v4f32 _tmp3m = __msa_fmadd_w(__msa_fsub_w(_r04, _r02), _vm2, __msa_fsub_w(_r01, _r03)); v4f32 _tmp4m = __msa_fmadd_w(__msa_fsub_w(_r04, _r02), _v2, __msa_fsub_w(_r01, _r03)); v4f32 _tmp5m = __msa_fmadd_w(__msa_fmadd_w(_r05, _v4, _r01), _vm5, _r03); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); __msa_st_w((v4i32)_tmp4m, tmp[4][m], 0); __msa_st_w((v4i32)_tmp5m, tmp[5][m], 0); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5; for (int m = 0; m < 6; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _r0tm0 = __msa_fmadd_w(__msa_fmadd_w(_tmp04, _v4, _tmp00), _vm5, _tmp02); v4f32 _r0tm1 = __msa_fmadd_w(__msa_fadd_w(_tmp04, _tmp03), _vm4, __msa_fadd_w(_tmp01, _tmp02)); v4f32 _r0tm2 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp03), _v4, __msa_fsub_w(_tmp01, _tmp02)); v4f32 _r0tm3 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp02), _vm2, __msa_fsub_w(_tmp01, _tmp03)); v4f32 _r0tm4 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp02), _v2, __msa_fsub_w(_tmp01, _tmp03)); v4f32 _r0tm5 = __msa_fmadd_w(__msa_fmadd_w(_tmp05, _v4, _tmp01), _vm5, _tmp03); __msa_st_w((v4i32)_r0tm0, r0_tm_0, 0); __msa_st_w((v4i32)_r0tm1, r0_tm_1, 0); __msa_st_w((v4i32)_r0tm2, r0_tm_2, 0); __msa_st_w((v4i32)_r0tm3, r0_tm_3, 0); __msa_st_w((v4i32)_r0tm4, r0_tm_4, 0); __msa_st_w((v4i32)_r0tm5, r0_tm_5, 0); r0_tm_0 += tiles * 4 * 6; r0_tm_1 += tiles * 4 * 6; r0_tm_2 += tiles * 4 * 6; r0_tm_3 += tiles * 4 * 6; r0_tm_4 += tiles * 4 * 6; r0_tm_5 += tiles * 4 * 6; } } } } } static void conv3x3s1_winograd43_transform_output_pack4_msa(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); v4f32 _bias0 = biasptr ? (v4f32)__msa_ld_w(biasptr + p * 4, 0) : (v4f32)__msa_fill_w(0); float tmp[4][6][4]; v4f32 _v2 = __msa_fill_w_f32(2.f); v4f32 _v4 = __msa_fill_w_f32(4.f); v4f32 _v8 = __msa_fill_w_f32(8.f); // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5; float* output0 = out0.row<float>(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { v4f32 _out0tm0 = (v4f32)__msa_ld_w(output0_tm_0, 0); v4f32 _out0tm1 = (v4f32)__msa_ld_w(output0_tm_1, 0); v4f32 _out0tm2 = (v4f32)__msa_ld_w(output0_tm_2, 0); v4f32 _out0tm3 = (v4f32)__msa_ld_w(output0_tm_3, 0); v4f32 _out0tm4 = (v4f32)__msa_ld_w(output0_tm_4, 0); v4f32 _out0tm5 = (v4f32)__msa_ld_w(output0_tm_5, 0); v4f32 _tmp02a = __msa_fadd_w(_out0tm1, _out0tm2); v4f32 _tmp13a = __msa_fsub_w(_out0tm1, _out0tm2); v4f32 _tmp02b = __msa_fadd_w(_out0tm3, _out0tm4); v4f32 _tmp13b = __msa_fsub_w(_out0tm3, _out0tm4); v4f32 _tmp0m = __msa_fadd_w(__msa_fadd_w(_out0tm0, _tmp02a), _tmp02b); v4f32 _tmp1m = __msa_fmadd_w(_tmp13a, _v2, _tmp13b); v4f32 _tmp2m = __msa_fmadd_w(_tmp02a, _v4, _tmp02b); v4f32 _tmp3m = __msa_fmadd_w(__msa_fadd_w(_out0tm5, _tmp13a), _v8, _tmp13b); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); output0_tm_0 += tiles * 4 * 6; output0_tm_1 += tiles * 4 * 6; output0_tm_2 += tiles * 4 * 6; output0_tm_3 += tiles * 4 * 6; output0_tm_4 += tiles * 4 * 6; output0_tm_5 += tiles * 4 * 6; } for (int m = 0; m < 4; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _tmp02a = __msa_fadd_w(_tmp01, _tmp02); v4f32 _tmp13a = __msa_fsub_w(_tmp01, _tmp02); v4f32 _tmp02b = __msa_fadd_w(_tmp03, _tmp04); v4f32 _tmp13b = __msa_fsub_w(_tmp03, _tmp04); v4f32 _out00 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp00, _tmp02a), _tmp02b)); v4f32 _out01 = __msa_fadd_w(_bias0, __msa_fmadd_w(_tmp13a, _v2, _tmp13b)); v4f32 _out02 = __msa_fadd_w(_bias0, __msa_fmadd_w(_tmp02a, _v4, _tmp02b)); v4f32 _out03 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fadd_w(_tmp05, _tmp13a), _v8, _tmp13b)); __msa_st_w((v4i32)_out00, output0, 0); __msa_st_w((v4i32)_out01, output0 + 4, 0); __msa_st_w((v4i32)_out02, output0 + 4 * 2, 0); __msa_st_w((v4i32)_out03, output0 + 4 * 3, 0); output0 += outw * 4; } } } } }
GB_unaryop__lnot_fp32_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp32_bool // op(A') function: GB_tran__lnot_fp32_bool // C type: float // A type: bool // cast: float cij = (float) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ bool #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp32_bool ( float *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp32_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 16; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
swap.h
/* * Author: Salvatore Mandra (salvatore.mandra@nasa.gov) * * Copyright © 2021, United States Government, as represented by the * Administrator of the National Aeronautics and Space Administration. All * rights reserved. * * The HybridQ: A Hybrid Simulator for Quantum Circuits platform is licensed * under the Apache License, Version 2.0 (the "License"); you may not use this * file except in compliance with the License. You may obtain a copy of the * License at http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ #ifndef HYBRIDQ__SWAP_H #define HYBRIDQ__SWAP_H #include "pack.h" #include "utils.h" namespace hybridq::swap { template <typename Positions, std::size_t size = array_size_v<Positions>> constexpr inline auto swap(std::size_t x, Positions&& pos) { std::size_t y{0}; for (std::size_t i = 0; i < size; ++i) y ^= ((x >> i) & 1uL) << pos[i]; return y; } template <typename pack_type, typename Positions, std::size_t... I> constexpr inline void _swap(pack_type&& pack, Positions&& pos, std::index_sequence<I...>) { pack = remove_pcvr_t<pack_type>{pack[swap(I, pos)]...}; } template <typename pack_type, typename Positions, std::size_t size = pack_size_v<pack_type>> constexpr inline void swap(pack_type&& pack, Positions&& pos) { _swap(pack, pos, std::make_index_sequence<size>{}); } template <typename float_type, typename Positions, std::size_t swap_size = array_size_v<Positions>> int swap_array(float_type* array, Positions&& pos, const std::size_t size) { // Reinterpret auto* _array = reinterpret_cast< typename hybridq::__pack__<float_type, 1uL << swap_size>::value_type*>( array); #pragma omp parallel for for (std::size_t i = 0; i < (size >> swap_size); ++i) swap(_array[i], pos); return 0; } template <typename float_type, typename index_type> int swap_array(float_type* array, index_type* pos, const std::size_t size, const std::size_t n_pos) { // Get U_Size const std::size_t swap_size = 1uL << n_pos; // Compute offset std::size_t offset[n_pos]; for (std::size_t i = 0; i < n_pos; ++i) { offset[i] = 0; for (std::size_t j = i + 1; j < n_pos; ++j) offset[i] += (pos[j] < pos[i]); } // Allocate buffers float_type _array[swap_size]; std::size_t _swap_pos[swap_size]; for (std::size_t x = 0; x < swap_size; ++x) { std::size_t y{0}; for (std::size_t i = 0; i < n_pos; ++i) y ^= ((x >> i) & 1uL) << pos[i]; _swap_pos[x] = y; } #pragma omp parallel for private(_array) for (std::size_t i = 0; i < size >> n_pos; ++i) { // Load buffer for (std::size_t j = 0; j < swap_size; ++j) _array[j] = array[_swap_pos[j] ^ (i << n_pos)]; // Dump buffer for (std::size_t j = 0; j < swap_size; ++j) array[j ^ (i << n_pos)] = _array[j]; } return 0; } } // namespace hybridq::swap #endif
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr( NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl * startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Optional<std::pair<unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, ConceptDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckForDelayedContext = true); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckCaller = true); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
GB_full_add_template.c
//------------------------------------------------------------------------------ // GB_full_add_template: phase2 for C=A+B, C<M>=A+B, C<!M>=A+B, C is full //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C is full. The mask M is not present (otherwise, C would be sparse, // hypersparse, or bitmap). All of these methods are asymptotically optimal. // ------------------------------------------ // C = A + B // ------------------------------------------ // full . sparse full // full . bitmap full // full . full sparse // full . full bitmap // full . full full // If C is iso and full, this phase has nothing to do. #ifndef GB_ISO_ADD { int64_t p ; ASSERT (M == NULL) ; ASSERT (A_is_full || B_is_full) ; ASSERT (C_sparsity == GxB_FULL) ; if (A_is_full && B_is_full) { //---------------------------------------------------------------------- // Method30: C, A, B are all full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(C_nthreads) schedule(static) for (p = 0 ; p < cnz ; p++) { // C (i,j) = A (i,j) + B (i,j) GB_LOAD_A (aij, Ax, p, A_iso) ; GB_LOAD_B (bij, Bx, p, B_iso) ; GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ; } } else if (A_is_full) { //---------------------------------------------------------------------- // C and A are full; B is hypersparse, sparse, or bitmap //---------------------------------------------------------------------- if (B_is_bitmap) { //------------------------------------------------------------------ // Method31: C and A are full; B is bitmap //------------------------------------------------------------------ #pragma omp parallel for num_threads(C_nthreads) schedule(static) for (p = 0 ; p < cnz ; p++) { if (Bb [p]) { // C (i,j) = A (i,j) + B (i,j) GB_LOAD_A (aij, Ax, p, A_iso) ; GB_LOAD_B (bij, Bx, p, B_iso) ; GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ; } else { #ifdef GB_EWISEUNION { // C (i,j) = A(i,j) + beta GB_LOAD_A (aij, Ax, p, A_iso) ; GB_BINOP (GB_CX (p), aij, beta_scalar, p % vlen, p / vlen); } #else { // C (i,j) = A (i,j) GB_COPY_A_TO_C (GB_CX (p), Ax, p, A_iso) ; } #endif } } } else { //------------------------------------------------------------------ // Method32: C and A are full; B is sparse or hypersparse //------------------------------------------------------------------ #pragma omp parallel for num_threads(C_nthreads) schedule(static) for (p = 0 ; p < cnz ; p++) { #ifdef GB_EWISEUNION { // C (i,j) = A(i,j) + beta GB_LOAD_A (aij, Ax, p, A_iso) ; GB_BINOP (GB_CX (p), aij, beta_scalar, p % vlen, p / vlen) ; } #else { // C (i,j) = A (i,j) GB_COPY_A_TO_C (GB_CX (p), Ax, p, A_iso) ; } #endif } GB_SLICE_MATRIX (B, 8, chunk) ; #pragma omp parallel for num_threads(B_nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < B_ntasks ; taskid++) { int64_t kfirst = kfirst_Bslice [taskid] ; int64_t klast = klast_Bslice [taskid] ; for (int64_t k = kfirst ; k <= klast ; k++) { // find the part of B(:,k) for this task int64_t j = GBH (Bh, k) ; int64_t pB_start, pB_end ; GB_get_pA (&pB_start, &pB_end, taskid, k, kfirst, klast, pstart_Bslice, Bp, vlen) ; int64_t pC_start = j * vlen ; // traverse over B(:,j), the kth vector of B for (int64_t pB = pB_start ; pB < pB_end ; pB++) { // C (i,j) = A (i,j) + B (i,j) int64_t i = Bi [pB] ; int64_t p = pC_start + i ; GB_LOAD_A (aij, Ax, p , A_iso) ; GB_LOAD_B (bij, Bx, pB, B_iso) ; GB_BINOP (GB_CX (p), aij, bij, i, j) ; } } } } } else { //---------------------------------------------------------------------- // C and B are full; A is hypersparse, sparse, or bitmap //---------------------------------------------------------------------- if (A_is_bitmap) { //------------------------------------------------------------------ // Method33: C and B are full; A is bitmap //------------------------------------------------------------------ #pragma omp parallel for num_threads(C_nthreads) schedule(static) for (p = 0 ; p < cnz ; p++) { if (Ab [p]) { // C (i,j) = A (i,j) + B (i,j) GB_LOAD_A (aij, Ax, p, A_iso) ; GB_LOAD_B (bij, Bx, p, B_iso) ; GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ; } else { #ifdef GB_EWISEUNION { // C (i,j) = alpha + B(i,j) GB_LOAD_B (bij, Bx, p, B_iso) ; GB_BINOP (GB_CX (p), alpha_scalar, bij, p % vlen, p / vlen); } #else { // C (i,j) = B (i,j) GB_COPY_B_TO_C (GB_CX (p), Bx, p, B_iso) ; } #endif } } } else { //------------------------------------------------------------------ // Method34: C and B are full; A is hypersparse or sparse //------------------------------------------------------------------ #pragma omp parallel for num_threads(C_nthreads) schedule(static) for (p = 0 ; p < cnz ; p++) { #ifdef GB_EWISEUNION { // C (i,j) = alpha + B(i,j) GB_LOAD_B (bij, Bx, p, B_iso) ; GB_BINOP (GB_CX (p), alpha_scalar, bij, p % vlen, p / vlen) ; } #else { // C (i,j) = B (i,j) GB_COPY_B_TO_C (GB_CX (p), Bx, p, B_iso) ; } #endif } GB_SLICE_MATRIX (A, 8, chunk) ; #pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < A_ntasks ; taskid++) { int64_t kfirst = kfirst_Aslice [taskid] ; int64_t klast = klast_Aslice [taskid] ; for (int64_t k = kfirst ; k <= klast ; k++) { // find the part of A(:,k) for this task int64_t j = GBH (Ah, k) ; int64_t pA_start, pA_end ; GB_get_pA (&pA_start, &pA_end, taskid, k, kfirst, klast, pstart_Aslice, Ap, vlen) ; int64_t pC_start = j * vlen ; // traverse over A(:,j), the kth vector of A for (int64_t pA = pA_start ; pA < pA_end ; pA++) { // C (i,j) = A (i,j) + B (i,j) int64_t i = Ai [pA] ; int64_t p = pC_start + i ; GB_LOAD_A (aij, Ax, pA, A_iso) ; GB_LOAD_B (bij, Bx, p , B_iso) ; GB_BINOP (GB_CX (p), aij, bij, i, j) ; } } } } } } #endif
c-parser.c
/* Parser for C and Objective-C. Copyright (C) 1987-2018 Free Software Foundation, Inc. Parser actions based on the old Bison parser; structure somewhat influenced by and fragments based on the C++ parser. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* TODO: Make sure all relevant comments, and all relevant code from all actions, brought over from old parser. Verify exact correspondence of syntax accepted. Add testcases covering every input symbol in every state in old and new parsers. Include full syntax for GNU C, including erroneous cases accepted with error messages, in syntax productions in comments. Make more diagnostics in the front end generally take an explicit location rather than implicitly using input_location. */ #include "config.h" #define INCLUDE_UNIQUE_PTR #include "system.h" #include "coretypes.h" #include "target.h" #include "function.h" #include "c-tree.h" #include "timevar.h" #include "stringpool.h" #include "cgraph.h" #include "attribs.h" #include "stor-layout.h" #include "varasm.h" #include "trans-mem.h" #include "c-family/c-pragma.h" #include "c-lang.h" #include "c-family/c-objc.h" #include "plugin.h" #include "omp-general.h" #include "omp-offload.h" #include "builtins.h" #include "gomp-constants.h" #include "c-family/c-indentation.h" #include "gimple-expr.h" #include "context.h" #include "gcc-rich-location.h" #include "c-parser.h" #include "gimple-parser.h" #include "read-rtl-function.h" #include "run-rtl-passes.h" #include "intl.h" #include "c-family/name-hint.h" #include "tree-iterator.h" /* We need to walk over decls with incomplete struct/union/enum types after parsing the whole translation unit. In finish_decl(), if the decl is static, has incomplete struct/union/enum type, it is appeneded to incomplete_record_decls. In c_parser_translation_unit(), we iterate over incomplete_record_decls and report error if any of the decls are still incomplete. */ vec<tree> incomplete_record_decls; void set_c_expr_source_range (c_expr *expr, location_t start, location_t finish) { expr->src_range.m_start = start; expr->src_range.m_finish = finish; if (expr->value) set_source_range (expr->value, start, finish); } void set_c_expr_source_range (c_expr *expr, source_range src_range) { expr->src_range = src_range; if (expr->value) set_source_range (expr->value, src_range); } /* Initialization routine for this file. */ void c_parse_init (void) { /* The only initialization required is of the reserved word identifiers. */ unsigned int i; tree id; int mask = 0; /* Make sure RID_MAX hasn't grown past the 8 bits used to hold the keyword in the c_token structure. */ gcc_assert (RID_MAX <= 255); mask |= D_CXXONLY; if (!flag_isoc99) mask |= D_C99; if (flag_no_asm) { mask |= D_ASM | D_EXT; if (!flag_isoc99) mask |= D_EXT89; } if (!c_dialect_objc ()) mask |= D_OBJC | D_CXX_OBJC; ridpointers = ggc_cleared_vec_alloc<tree> ((int) RID_MAX); for (i = 0; i < num_c_common_reswords; i++) { /* If a keyword is disabled, do not enter it into the table and so create a canonical spelling that isn't a keyword. */ if (c_common_reswords[i].disable & mask) { if (warn_cxx_compat && (c_common_reswords[i].disable & D_CXXWARN)) { id = get_identifier (c_common_reswords[i].word); C_SET_RID_CODE (id, RID_CXX_COMPAT_WARN); C_IS_RESERVED_WORD (id) = 1; } continue; } id = get_identifier (c_common_reswords[i].word); C_SET_RID_CODE (id, c_common_reswords[i].rid); C_IS_RESERVED_WORD (id) = 1; ridpointers [(int) c_common_reswords[i].rid] = id; } for (i = 0; i < NUM_INT_N_ENTS; i++) { /* We always create the symbols but they aren't always supported. */ char name[50]; sprintf (name, "__int%d", int_n_data[i].bitsize); id = get_identifier (name); C_SET_RID_CODE (id, RID_FIRST_INT_N + i); C_IS_RESERVED_WORD (id) = 1; } } /* A parser structure recording information about the state and context of parsing. Includes lexer information with up to two tokens of look-ahead; more are not needed for C. */ struct GTY(()) c_parser { /* The look-ahead tokens. */ c_token * GTY((skip)) tokens; /* Buffer for look-ahead tokens. */ c_token tokens_buf[4]; /* How many look-ahead tokens are available (0 - 4, or more if parsing from pre-lexed tokens). */ unsigned int tokens_avail; /* True if a syntax error is being recovered from; false otherwise. c_parser_error sets this flag. It should clear this flag when enough tokens have been consumed to recover from the error. */ BOOL_BITFIELD error : 1; /* True if we're processing a pragma, and shouldn't automatically consume CPP_PRAGMA_EOL. */ BOOL_BITFIELD in_pragma : 1; /* True if we're parsing the outermost block of an if statement. */ BOOL_BITFIELD in_if_block : 1; /* True if we want to lex an untranslated string. */ BOOL_BITFIELD lex_untranslated_string : 1; /* Objective-C specific parser/lexer information. */ /* True if we are in a context where the Objective-C "PQ" keywords are considered keywords. */ BOOL_BITFIELD objc_pq_context : 1; /* True if we are parsing a (potential) Objective-C foreach statement. This is set to true after we parsed 'for (' and while we wait for 'in' or ';' to decide if it's a standard C for loop or an Objective-C foreach loop. */ BOOL_BITFIELD objc_could_be_foreach_context : 1; /* The following flag is needed to contextualize Objective-C lexical analysis. In some cases (e.g., 'int NSObject;'), it is undesirable to bind an identifier to an Objective-C class, even if a class with that name exists. */ BOOL_BITFIELD objc_need_raw_identifier : 1; /* Nonzero if we're processing a __transaction statement. The value is 1 | TM_STMT_ATTR_*. */ unsigned int in_transaction : 4; /* True if we are in a context where the Objective-C "Property attribute" keywords are valid. */ BOOL_BITFIELD objc_property_attr_context : 1; /* Location of the last consumed token. */ location_t last_token_location; }; /* Return a pointer to the Nth token in PARSERs tokens_buf. */ c_token * c_parser_tokens_buf (c_parser *parser, unsigned n) { return &parser->tokens_buf[n]; } /* Return the error state of PARSER. */ bool c_parser_error (c_parser *parser) { return parser->error; } /* Set the error state of PARSER to ERR. */ void c_parser_set_error (c_parser *parser, bool err) { parser->error = err; } /* The actual parser and external interface. ??? Does this need to be garbage-collected? */ static GTY (()) c_parser *the_parser; /* Read in and lex a single token, storing it in *TOKEN. */ static void c_lex_one_token (c_parser *parser, c_token *token) { timevar_push (TV_LEX); token->type = c_lex_with_flags (&token->value, &token->location, &token->flags, (parser->lex_untranslated_string ? C_LEX_STRING_NO_TRANSLATE : 0)); token->id_kind = C_ID_NONE; token->keyword = RID_MAX; token->pragma_kind = PRAGMA_NONE; switch (token->type) { case CPP_NAME: { tree decl; bool objc_force_identifier = parser->objc_need_raw_identifier; if (c_dialect_objc ()) parser->objc_need_raw_identifier = false; if (C_IS_RESERVED_WORD (token->value)) { enum rid rid_code = C_RID_CODE (token->value); if (rid_code == RID_CXX_COMPAT_WARN) { warning_at (token->location, OPT_Wc___compat, "identifier %qE conflicts with C++ keyword", token->value); } else if (rid_code >= RID_FIRST_ADDR_SPACE && rid_code <= RID_LAST_ADDR_SPACE) { addr_space_t as; as = (addr_space_t) (rid_code - RID_FIRST_ADDR_SPACE); targetm.addr_space.diagnose_usage (as, token->location); token->id_kind = C_ID_ADDRSPACE; token->keyword = rid_code; break; } else if (c_dialect_objc () && OBJC_IS_PQ_KEYWORD (rid_code)) { /* We found an Objective-C "pq" keyword (in, out, inout, bycopy, byref, oneway). They need special care because the interpretation depends on the context. */ if (parser->objc_pq_context) { token->type = CPP_KEYWORD; token->keyword = rid_code; break; } else if (parser->objc_could_be_foreach_context && rid_code == RID_IN) { /* We are in Objective-C, inside a (potential) foreach context (which means after having parsed 'for (', but before having parsed ';'), and we found 'in'. We consider it the keyword which terminates the declaration at the beginning of a foreach-statement. Note that this means you can't use 'in' for anything else in that context; in particular, in Objective-C you can't use 'in' as the name of the running variable in a C for loop. We could potentially try to add code here to disambiguate, but it seems a reasonable limitation. */ token->type = CPP_KEYWORD; token->keyword = rid_code; break; } /* Else, "pq" keywords outside of the "pq" context are not keywords, and we fall through to the code for normal tokens. */ } else if (c_dialect_objc () && OBJC_IS_PATTR_KEYWORD (rid_code)) { /* We found an Objective-C "property attribute" keyword (getter, setter, readonly, etc). These are only valid in the property context. */ if (parser->objc_property_attr_context) { token->type = CPP_KEYWORD; token->keyword = rid_code; break; } /* Else they are not special keywords. */ } else if (c_dialect_objc () && (OBJC_IS_AT_KEYWORD (rid_code) || OBJC_IS_CXX_KEYWORD (rid_code))) { /* We found one of the Objective-C "@" keywords (defs, selector, synchronized, etc) or one of the Objective-C "cxx" keywords (class, private, protected, public, try, catch, throw) without a preceding '@' sign. Do nothing and fall through to the code for normal tokens (in C++ we would still consider the CXX ones keywords, but not in C). */ ; } else { token->type = CPP_KEYWORD; token->keyword = rid_code; break; } } decl = lookup_name (token->value); if (decl) { if (TREE_CODE (decl) == TYPE_DECL) { token->id_kind = C_ID_TYPENAME; break; } } else if (c_dialect_objc ()) { tree objc_interface_decl = objc_is_class_name (token->value); /* Objective-C class names are in the same namespace as variables and typedefs, and hence are shadowed by local declarations. */ if (objc_interface_decl && (!objc_force_identifier || global_bindings_p ())) { token->value = objc_interface_decl; token->id_kind = C_ID_CLASSNAME; break; } } token->id_kind = C_ID_ID; } break; case CPP_AT_NAME: /* This only happens in Objective-C; it must be a keyword. */ token->type = CPP_KEYWORD; switch (C_RID_CODE (token->value)) { /* Replace 'class' with '@class', 'private' with '@private', etc. This prevents confusion with the C++ keyword 'class', and makes the tokens consistent with other Objective-C 'AT' keywords. For example '@class' is reported as RID_AT_CLASS which is consistent with '@synchronized', which is reported as RID_AT_SYNCHRONIZED. */ case RID_CLASS: token->keyword = RID_AT_CLASS; break; case RID_PRIVATE: token->keyword = RID_AT_PRIVATE; break; case RID_PROTECTED: token->keyword = RID_AT_PROTECTED; break; case RID_PUBLIC: token->keyword = RID_AT_PUBLIC; break; case RID_THROW: token->keyword = RID_AT_THROW; break; case RID_TRY: token->keyword = RID_AT_TRY; break; case RID_CATCH: token->keyword = RID_AT_CATCH; break; case RID_SYNCHRONIZED: token->keyword = RID_AT_SYNCHRONIZED; break; default: token->keyword = C_RID_CODE (token->value); } break; case CPP_COLON: case CPP_COMMA: case CPP_CLOSE_PAREN: case CPP_SEMICOLON: /* These tokens may affect the interpretation of any identifiers following, if doing Objective-C. */ if (c_dialect_objc ()) parser->objc_need_raw_identifier = false; break; case CPP_PRAGMA: /* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */ token->pragma_kind = (enum pragma_kind) TREE_INT_CST_LOW (token->value); token->value = NULL; break; default: break; } timevar_pop (TV_LEX); } /* Return a pointer to the next token from PARSER, reading it in if necessary. */ c_token * c_parser_peek_token (c_parser *parser) { if (parser->tokens_avail == 0) { c_lex_one_token (parser, &parser->tokens[0]); parser->tokens_avail = 1; } return &parser->tokens[0]; } /* Return a pointer to the next-but-one token from PARSER, reading it in if necessary. The next token is already read in. */ c_token * c_parser_peek_2nd_token (c_parser *parser) { if (parser->tokens_avail >= 2) return &parser->tokens[1]; gcc_assert (parser->tokens_avail == 1); gcc_assert (parser->tokens[0].type != CPP_EOF); gcc_assert (parser->tokens[0].type != CPP_PRAGMA_EOL); c_lex_one_token (parser, &parser->tokens[1]); parser->tokens_avail = 2; return &parser->tokens[1]; } /* Return a pointer to the Nth token from PARSER, reading it in if necessary. The N-1th token is already read in. */ c_token * c_parser_peek_nth_token (c_parser *parser, unsigned int n) { /* N is 1-based, not zero-based. */ gcc_assert (n > 0); if (parser->tokens_avail >= n) return &parser->tokens[n - 1]; gcc_assert (parser->tokens_avail == n - 1); c_lex_one_token (parser, &parser->tokens[n - 1]); parser->tokens_avail = n; return &parser->tokens[n - 1]; } bool c_keyword_starts_typename (enum rid keyword) { switch (keyword) { case RID_UNSIGNED: case RID_LONG: case RID_SHORT: case RID_SIGNED: case RID_COMPLEX: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: CASE_RID_FLOATN_NX: case RID_BOOL: case RID_ENUM: case RID_STRUCT: case RID_UNION: case RID_TYPEOF: case RID_CONST: case RID_ATOMIC: case RID_VOLATILE: case RID_RESTRICT: case RID_ATTRIBUTE: case RID_FRACT: case RID_ACCUM: case RID_SAT: case RID_AUTO_TYPE: case RID_ALIGNAS: return true; default: if (keyword >= RID_FIRST_INT_N && keyword < RID_FIRST_INT_N + NUM_INT_N_ENTS && int_n_enabled_p[keyword - RID_FIRST_INT_N]) return true; return false; } } /* Return true if TOKEN can start a type name, false otherwise. */ bool c_token_starts_typename (c_token *token) { switch (token->type) { case CPP_NAME: switch (token->id_kind) { case C_ID_ID: return false; case C_ID_ADDRSPACE: return true; case C_ID_TYPENAME: return true; case C_ID_CLASSNAME: gcc_assert (c_dialect_objc ()); return true; default: gcc_unreachable (); } case CPP_KEYWORD: return c_keyword_starts_typename (token->keyword); case CPP_LESS: if (c_dialect_objc ()) return true; return false; default: return false; } } /* Return true if the next token from PARSER can start a type name, false otherwise. LA specifies how to do lookahead in order to detect unknown type names. If unsure, pick CLA_PREFER_ID. */ static inline bool c_parser_next_tokens_start_typename (c_parser *parser, enum c_lookahead_kind la) { c_token *token = c_parser_peek_token (parser); if (c_token_starts_typename (token)) return true; /* Try a bit harder to detect an unknown typename. */ if (la != cla_prefer_id && token->type == CPP_NAME && token->id_kind == C_ID_ID /* Do not try too hard when we could have "object in array". */ && !parser->objc_could_be_foreach_context && (la == cla_prefer_type || c_parser_peek_2nd_token (parser)->type == CPP_NAME || c_parser_peek_2nd_token (parser)->type == CPP_MULT) /* Only unknown identifiers. */ && !lookup_name (token->value)) return true; return false; } /* Return true if TOKEN is a type qualifier, false otherwise. */ static bool c_token_is_qualifier (c_token *token) { switch (token->type) { case CPP_NAME: switch (token->id_kind) { case C_ID_ADDRSPACE: return true; default: return false; } case CPP_KEYWORD: switch (token->keyword) { case RID_CONST: case RID_VOLATILE: case RID_RESTRICT: case RID_ATTRIBUTE: case RID_ATOMIC: return true; default: return false; } case CPP_LESS: return false; default: gcc_unreachable (); } } /* Return true if the next token from PARSER is a type qualifier, false otherwise. */ static inline bool c_parser_next_token_is_qualifier (c_parser *parser) { c_token *token = c_parser_peek_token (parser); return c_token_is_qualifier (token); } /* Return true if TOKEN can start declaration specifiers, false otherwise. */ static bool c_token_starts_declspecs (c_token *token) { switch (token->type) { case CPP_NAME: switch (token->id_kind) { case C_ID_ID: return false; case C_ID_ADDRSPACE: return true; case C_ID_TYPENAME: return true; case C_ID_CLASSNAME: gcc_assert (c_dialect_objc ()); return true; default: gcc_unreachable (); } case CPP_KEYWORD: switch (token->keyword) { case RID_STATIC: case RID_EXTERN: case RID_REGISTER: case RID_TYPEDEF: case RID_INLINE: case RID_NORETURN: case RID_AUTO: case RID_THREAD: case RID_UNSIGNED: case RID_LONG: case RID_SHORT: case RID_SIGNED: case RID_COMPLEX: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: CASE_RID_FLOATN_NX: case RID_BOOL: case RID_ENUM: case RID_STRUCT: case RID_UNION: case RID_TYPEOF: case RID_CONST: case RID_VOLATILE: case RID_RESTRICT: case RID_ATTRIBUTE: case RID_FRACT: case RID_ACCUM: case RID_SAT: case RID_ALIGNAS: case RID_ATOMIC: case RID_AUTO_TYPE: return true; default: if (token->keyword >= RID_FIRST_INT_N && token->keyword < RID_FIRST_INT_N + NUM_INT_N_ENTS && int_n_enabled_p[token->keyword - RID_FIRST_INT_N]) return true; return false; } case CPP_LESS: if (c_dialect_objc ()) return true; return false; default: return false; } } /* Return true if TOKEN can start declaration specifiers or a static assertion, false otherwise. */ static bool c_token_starts_declaration (c_token *token) { if (c_token_starts_declspecs (token) || token->keyword == RID_STATIC_ASSERT) return true; else return false; } /* Return true if the next token from PARSER can start declaration specifiers, false otherwise. */ bool c_parser_next_token_starts_declspecs (c_parser *parser) { c_token *token = c_parser_peek_token (parser); /* In Objective-C, a classname normally starts a declspecs unless it is immediately followed by a dot. In that case, it is the Objective-C 2.0 "dot-syntax" for class objects, ie, calls the setter/getter on the class. c_token_starts_declspecs() can't differentiate between the two cases because it only checks the current token, so we have a special check here. */ if (c_dialect_objc () && token->type == CPP_NAME && token->id_kind == C_ID_CLASSNAME && c_parser_peek_2nd_token (parser)->type == CPP_DOT) return false; return c_token_starts_declspecs (token); } /* Return true if the next tokens from PARSER can start declaration specifiers or a static assertion, false otherwise. */ bool c_parser_next_tokens_start_declaration (c_parser *parser) { c_token *token = c_parser_peek_token (parser); /* Same as above. */ if (c_dialect_objc () && token->type == CPP_NAME && token->id_kind == C_ID_CLASSNAME && c_parser_peek_2nd_token (parser)->type == CPP_DOT) return false; /* Labels do not start declarations. */ if (token->type == CPP_NAME && c_parser_peek_2nd_token (parser)->type == CPP_COLON) return false; if (c_token_starts_declaration (token)) return true; if (c_parser_next_tokens_start_typename (parser, cla_nonabstract_decl)) return true; return false; } /* Consume the next token from PARSER. */ void c_parser_consume_token (c_parser *parser) { gcc_assert (parser->tokens_avail >= 1); gcc_assert (parser->tokens[0].type != CPP_EOF); gcc_assert (!parser->in_pragma || parser->tokens[0].type != CPP_PRAGMA_EOL); gcc_assert (parser->error || parser->tokens[0].type != CPP_PRAGMA); parser->last_token_location = parser->tokens[0].location; if (parser->tokens != &parser->tokens_buf[0]) parser->tokens++; else if (parser->tokens_avail == 2) parser->tokens[0] = parser->tokens[1]; parser->tokens_avail--; } /* Expect the current token to be a #pragma. Consume it and remember that we've begun parsing a pragma. */ static void c_parser_consume_pragma (c_parser *parser) { gcc_assert (!parser->in_pragma); gcc_assert (parser->tokens_avail >= 1); gcc_assert (parser->tokens[0].type == CPP_PRAGMA); if (parser->tokens != &parser->tokens_buf[0]) parser->tokens++; else if (parser->tokens_avail == 2) parser->tokens[0] = parser->tokens[1]; parser->tokens_avail--; parser->in_pragma = true; } /* Update the global input_location from TOKEN. */ static inline void c_parser_set_source_position_from_token (c_token *token) { if (token->type != CPP_EOF) { input_location = token->location; } } /* Helper function for c_parser_error. Having peeked a token of kind TOK1_KIND that might signify a conflict marker, peek successor tokens to determine if we actually do have a conflict marker. Specifically, we consider a run of 7 '<', '=' or '>' characters at the start of a line as a conflict marker. These come through the lexer as three pairs and a single, e.g. three CPP_LSHIFT ("<<") and a CPP_LESS ('<'). If it returns true, *OUT_LOC is written to with the location/range of the marker. */ static bool c_parser_peek_conflict_marker (c_parser *parser, enum cpp_ttype tok1_kind, location_t *out_loc) { c_token *token2 = c_parser_peek_2nd_token (parser); if (token2->type != tok1_kind) return false; c_token *token3 = c_parser_peek_nth_token (parser, 3); if (token3->type != tok1_kind) return false; c_token *token4 = c_parser_peek_nth_token (parser, 4); if (token4->type != conflict_marker_get_final_tok_kind (tok1_kind)) return false; /* It must be at the start of the line. */ location_t start_loc = c_parser_peek_token (parser)->location; if (LOCATION_COLUMN (start_loc) != 1) return false; /* We have a conflict marker. Construct a location of the form: <<<<<<< ^~~~~~~ with start == caret, finishing at the end of the marker. */ location_t finish_loc = get_finish (token4->location); *out_loc = make_location (start_loc, start_loc, finish_loc); return true; } /* Issue a diagnostic of the form FILE:LINE: MESSAGE before TOKEN where TOKEN is the next token in the input stream of PARSER. MESSAGE (specified by the caller) is usually of the form "expected OTHER-TOKEN". Use RICHLOC as the location of the diagnostic. Do not issue a diagnostic if still recovering from an error. Return true iff an error was actually emitted. ??? This is taken from the C++ parser, but building up messages in this way is not i18n-friendly and some other approach should be used. */ static bool c_parser_error_richloc (c_parser *parser, const char *gmsgid, rich_location *richloc) { c_token *token = c_parser_peek_token (parser); if (parser->error) return false; parser->error = true; if (!gmsgid) return false; /* If this is actually a conflict marker, report it as such. */ if (token->type == CPP_LSHIFT || token->type == CPP_RSHIFT || token->type == CPP_EQ_EQ) { location_t loc; if (c_parser_peek_conflict_marker (parser, token->type, &loc)) { error_at (loc, "version control conflict marker in file"); return true; } } c_parse_error (gmsgid, /* Because c_parse_error does not understand CPP_KEYWORD, keywords are treated like identifiers. */ (token->type == CPP_KEYWORD ? CPP_NAME : token->type), /* ??? The C parser does not save the cpp flags of a token, we need to pass 0 here and we will not get the source spelling of some tokens but rather the canonical spelling. */ token->value, /*flags=*/0, richloc); return true; } /* As c_parser_error_richloc, but issue the message at the location of PARSER's next token, or at input_location if the next token is EOF. */ bool c_parser_error (c_parser *parser, const char *gmsgid) { c_token *token = c_parser_peek_token (parser); c_parser_set_source_position_from_token (token); rich_location richloc (line_table, input_location); return c_parser_error_richloc (parser, gmsgid, &richloc); } /* Some tokens naturally come in pairs e.g.'(' and ')'. This class is for tracking such a matching pair of symbols. In particular, it tracks the location of the first token, so that if the second token is missing, we can highlight the location of the first token when notifying the user about the problem. */ template <typename traits_t> class token_pair { public: /* token_pair's ctor. */ token_pair () : m_open_loc (UNKNOWN_LOCATION) {} /* If the next token is the opening symbol for this pair, consume it and return true. Otherwise, issue an error and return false. In either case, record the location of the opening token. */ bool require_open (c_parser *parser) { c_token *token = c_parser_peek_token (parser); if (token) m_open_loc = token->location; return c_parser_require (parser, traits_t::open_token_type, traits_t::open_gmsgid); } /* Consume the next token from PARSER, recording its location as that of the opening token within the pair. */ void consume_open (c_parser *parser) { c_token *token = c_parser_peek_token (parser); gcc_assert (token->type == traits_t::open_token_type); m_open_loc = token->location; c_parser_consume_token (parser); } /* If the next token is the closing symbol for this pair, consume it and return true. Otherwise, issue an error, highlighting the location of the corresponding opening token, and return false. */ bool require_close (c_parser *parser) const { return c_parser_require (parser, traits_t::close_token_type, traits_t::close_gmsgid, m_open_loc); } /* Like token_pair::require_close, except that tokens will be skipped until the desired token is found. An error message is still produced if the next token is not as expected. */ void skip_until_found_close (c_parser *parser) const { c_parser_skip_until_found (parser, traits_t::close_token_type, traits_t::close_gmsgid, m_open_loc); } private: location_t m_open_loc; }; /* Traits for token_pair<T> for tracking matching pairs of parentheses. */ struct matching_paren_traits { static const enum cpp_ttype open_token_type = CPP_OPEN_PAREN; static const char * const open_gmsgid; static const enum cpp_ttype close_token_type = CPP_CLOSE_PAREN; static const char * const close_gmsgid; }; const char * const matching_paren_traits::open_gmsgid = "expected %<(%>"; const char * const matching_paren_traits::close_gmsgid = "expected %<)%>"; /* "matching_parens" is a token_pair<T> class for tracking matching pairs of parentheses. */ typedef token_pair<matching_paren_traits> matching_parens; /* Traits for token_pair<T> for tracking matching pairs of braces. */ struct matching_brace_traits { static const enum cpp_ttype open_token_type = CPP_OPEN_BRACE; static const char * const open_gmsgid; static const enum cpp_ttype close_token_type = CPP_CLOSE_BRACE; static const char * const close_gmsgid; }; const char * const matching_brace_traits::open_gmsgid = "expected %<{%>"; const char * const matching_brace_traits::close_gmsgid = "expected %<}%>"; /* "matching_braces" is a token_pair<T> class for tracking matching pairs of braces. */ typedef token_pair<matching_brace_traits> matching_braces; /* Get a description of the matching symbol to TYPE e.g. "(" for CPP_CLOSE_PAREN. */ static const char * get_matching_symbol (enum cpp_ttype type) { switch (type) { default: gcc_unreachable (); return ""; case CPP_CLOSE_PAREN: return "("; case CPP_CLOSE_BRACE: return "{"; } } /* If the next token is of the indicated TYPE, consume it. Otherwise, issue the error MSGID. If MSGID is NULL then a message has already been produced and no message will be produced this time. Returns true if found, false otherwise. If MATCHING_LOCATION is not UNKNOWN_LOCATION, then highlight it within any error as the location of an "opening" token matching the close token TYPE (e.g. the location of the '(' when TYPE is CPP_CLOSE_PAREN). If TYPE_IS_UNIQUE is true (the default) then msgid describes exactly one type (e.g. "expected %<)%>") and thus it may be reasonable to attempt to generate a fix-it hint for the problem. Otherwise msgid describes multiple token types (e.g. "expected %<;%>, %<,%> or %<)%>"), and thus we shouldn't attempt to generate a fix-it hint. */ bool c_parser_require (c_parser *parser, enum cpp_ttype type, const char *msgid, location_t matching_location, bool type_is_unique) { if (c_parser_next_token_is (parser, type)) { c_parser_consume_token (parser); return true; } else { location_t next_token_loc = c_parser_peek_token (parser)->location; gcc_rich_location richloc (next_token_loc); /* Potentially supply a fix-it hint, suggesting to add the missing token immediately after the *previous* token. This may move the primary location within richloc. */ if (!parser->error && type_is_unique) maybe_suggest_missing_token_insertion (&richloc, type, parser->last_token_location); /* If matching_location != UNKNOWN_LOCATION, highlight it. Attempt to consolidate diagnostics by printing it as a secondary range within the main diagnostic. */ bool added_matching_location = false; if (matching_location != UNKNOWN_LOCATION) added_matching_location = richloc.add_location_if_nearby (matching_location); if (c_parser_error_richloc (parser, msgid, &richloc)) /* If we weren't able to consolidate matching_location, then print it as a secondary diagnostic. */ if (matching_location != UNKNOWN_LOCATION && !added_matching_location) inform (matching_location, "to match this %qs", get_matching_symbol (type)); return false; } } /* If the next token is the indicated keyword, consume it. Otherwise, issue the error MSGID. Returns true if found, false otherwise. */ static bool c_parser_require_keyword (c_parser *parser, enum rid keyword, const char *msgid) { if (c_parser_next_token_is_keyword (parser, keyword)) { c_parser_consume_token (parser); return true; } else { c_parser_error (parser, msgid); return false; } } /* Like c_parser_require, except that tokens will be skipped until the desired token is found. An error message is still produced if the next token is not as expected. If MSGID is NULL then a message has already been produced and no message will be produced this time. If MATCHING_LOCATION is not UNKNOWN_LOCATION, then highlight it within any error as the location of an "opening" token matching the close token TYPE (e.g. the location of the '(' when TYPE is CPP_CLOSE_PAREN). */ void c_parser_skip_until_found (c_parser *parser, enum cpp_ttype type, const char *msgid, location_t matching_location) { unsigned nesting_depth = 0; if (c_parser_require (parser, type, msgid, matching_location)) return; /* Skip tokens until the desired token is found. */ while (true) { /* Peek at the next token. */ c_token *token = c_parser_peek_token (parser); /* If we've reached the token we want, consume it and stop. */ if (token->type == type && !nesting_depth) { c_parser_consume_token (parser); break; } /* If we've run out of tokens, stop. */ if (token->type == CPP_EOF) return; if (token->type == CPP_PRAGMA_EOL && parser->in_pragma) return; if (token->type == CPP_OPEN_BRACE || token->type == CPP_OPEN_PAREN || token->type == CPP_OPEN_SQUARE) ++nesting_depth; else if (token->type == CPP_CLOSE_BRACE || token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE) { if (nesting_depth-- == 0) break; } /* Consume this token. */ c_parser_consume_token (parser); } parser->error = false; } /* Skip tokens until the end of a parameter is found, but do not consume the comma, semicolon or closing delimiter. */ static void c_parser_skip_to_end_of_parameter (c_parser *parser) { unsigned nesting_depth = 0; while (true) { c_token *token = c_parser_peek_token (parser); if ((token->type == CPP_COMMA || token->type == CPP_SEMICOLON) && !nesting_depth) break; /* If we've run out of tokens, stop. */ if (token->type == CPP_EOF) return; if (token->type == CPP_PRAGMA_EOL && parser->in_pragma) return; if (token->type == CPP_OPEN_BRACE || token->type == CPP_OPEN_PAREN || token->type == CPP_OPEN_SQUARE) ++nesting_depth; else if (token->type == CPP_CLOSE_BRACE || token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE) { if (nesting_depth-- == 0) break; } /* Consume this token. */ c_parser_consume_token (parser); } parser->error = false; } /* Expect to be at the end of the pragma directive and consume an end of line marker. */ static void c_parser_skip_to_pragma_eol (c_parser *parser, bool error_if_not_eol = true) { gcc_assert (parser->in_pragma); parser->in_pragma = false; if (error_if_not_eol && c_parser_peek_token (parser)->type != CPP_PRAGMA_EOL) c_parser_error (parser, "expected end of line"); cpp_ttype token_type; do { c_token *token = c_parser_peek_token (parser); token_type = token->type; if (token_type == CPP_EOF) break; c_parser_consume_token (parser); } while (token_type != CPP_PRAGMA_EOL); parser->error = false; } /* Skip tokens until we have consumed an entire block, or until we have consumed a non-nested ';'. */ static void c_parser_skip_to_end_of_block_or_statement (c_parser *parser) { unsigned nesting_depth = 0; bool save_error = parser->error; while (true) { c_token *token; /* Peek at the next token. */ token = c_parser_peek_token (parser); switch (token->type) { case CPP_EOF: return; case CPP_PRAGMA_EOL: if (parser->in_pragma) return; break; case CPP_SEMICOLON: /* If the next token is a ';', we have reached the end of the statement. */ if (!nesting_depth) { /* Consume the ';'. */ c_parser_consume_token (parser); goto finished; } break; case CPP_CLOSE_BRACE: /* If the next token is a non-nested '}', then we have reached the end of the current block. */ if (nesting_depth == 0 || --nesting_depth == 0) { c_parser_consume_token (parser); goto finished; } break; case CPP_OPEN_BRACE: /* If it the next token is a '{', then we are entering a new block. Consume the entire block. */ ++nesting_depth; break; case CPP_PRAGMA: /* If we see a pragma, consume the whole thing at once. We have some safeguards against consuming pragmas willy-nilly. Normally, we'd expect to be here with parser->error set, which disables these safeguards. But it's possible to get here for secondary error recovery, after parser->error has been cleared. */ c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); parser->error = save_error; continue; default: break; } c_parser_consume_token (parser); } finished: parser->error = false; } /* CPP's options (initialized by c-opts.c). */ extern cpp_options *cpp_opts; /* Save the warning flags which are controlled by __extension__. */ static inline int disable_extension_diagnostics (void) { int ret = (pedantic | (warn_pointer_arith << 1) | (warn_traditional << 2) | (flag_iso << 3) | (warn_long_long << 4) | (warn_cxx_compat << 5) | (warn_overlength_strings << 6) /* warn_c90_c99_compat has three states: -1/0/1, so we must play tricks to properly restore it. */ | ((warn_c90_c99_compat == 1) << 7) | ((warn_c90_c99_compat == -1) << 8) /* Similarly for warn_c99_c11_compat. */ | ((warn_c99_c11_compat == 1) << 9) | ((warn_c99_c11_compat == -1) << 10) ); cpp_opts->cpp_pedantic = pedantic = 0; warn_pointer_arith = 0; cpp_opts->cpp_warn_traditional = warn_traditional = 0; flag_iso = 0; cpp_opts->cpp_warn_long_long = warn_long_long = 0; warn_cxx_compat = 0; warn_overlength_strings = 0; warn_c90_c99_compat = 0; warn_c99_c11_compat = 0; return ret; } /* Restore the warning flags which are controlled by __extension__. FLAGS is the return value from disable_extension_diagnostics. */ static inline void restore_extension_diagnostics (int flags) { cpp_opts->cpp_pedantic = pedantic = flags & 1; warn_pointer_arith = (flags >> 1) & 1; cpp_opts->cpp_warn_traditional = warn_traditional = (flags >> 2) & 1; flag_iso = (flags >> 3) & 1; cpp_opts->cpp_warn_long_long = warn_long_long = (flags >> 4) & 1; warn_cxx_compat = (flags >> 5) & 1; warn_overlength_strings = (flags >> 6) & 1; /* See above for why is this needed. */ warn_c90_c99_compat = (flags >> 7) & 1 ? 1 : ((flags >> 8) & 1 ? -1 : 0); warn_c99_c11_compat = (flags >> 9) & 1 ? 1 : ((flags >> 10) & 1 ? -1 : 0); } /* Helper data structure for parsing #pragma acc routine. */ struct oacc_routine_data { bool error_seen; /* Set if error has been reported. */ bool fndecl_seen; /* Set if one fn decl/definition has been seen already. */ tree clauses; location_t loc; }; static void c_parser_external_declaration (c_parser *); static void c_parser_asm_definition (c_parser *); static void c_parser_declaration_or_fndef (c_parser *, bool, bool, bool, bool, bool, tree *, vec<c_token>, struct oacc_routine_data * = NULL, bool * = NULL); static void c_parser_static_assert_declaration_no_semi (c_parser *); static void c_parser_static_assert_declaration (c_parser *); static struct c_typespec c_parser_enum_specifier (c_parser *); static struct c_typespec c_parser_struct_or_union_specifier (c_parser *); static tree c_parser_struct_declaration (c_parser *); static struct c_typespec c_parser_typeof_specifier (c_parser *); static tree c_parser_alignas_specifier (c_parser *); static struct c_declarator *c_parser_direct_declarator (c_parser *, bool, c_dtr_syn, bool *); static struct c_declarator *c_parser_direct_declarator_inner (c_parser *, bool, struct c_declarator *); static struct c_arg_info *c_parser_parms_declarator (c_parser *, bool, tree); static struct c_arg_info *c_parser_parms_list_declarator (c_parser *, tree, tree); static struct c_parm *c_parser_parameter_declaration (c_parser *, tree); static tree c_parser_simple_asm_expr (c_parser *); static tree c_parser_attributes (c_parser *); static struct c_expr c_parser_initializer (c_parser *); static struct c_expr c_parser_braced_init (c_parser *, tree, bool, struct obstack *); static void c_parser_initelt (c_parser *, struct obstack *); static void c_parser_initval (c_parser *, struct c_expr *, struct obstack *); static tree c_parser_compound_statement (c_parser *); static void c_parser_compound_statement_nostart (c_parser *); static void c_parser_label (c_parser *); static void c_parser_statement (c_parser *, bool *, location_t * = NULL); static void c_parser_statement_after_labels (c_parser *, bool *, vec<tree> * = NULL); static tree c_parser_c99_block_statement (c_parser *, bool *, location_t * = NULL); static void c_parser_if_statement (c_parser *, bool *, vec<tree> *); static void c_parser_switch_statement (c_parser *, bool *); static void c_parser_while_statement (c_parser *, bool, unsigned short, bool *); static void c_parser_do_statement (c_parser *, bool, unsigned short); static void c_parser_for_statement (c_parser *, bool, unsigned short, bool *); static tree c_parser_asm_statement (c_parser *); static tree c_parser_asm_operands (c_parser *); static tree c_parser_asm_goto_operands (c_parser *); static tree c_parser_asm_clobbers (c_parser *); static struct c_expr c_parser_expr_no_commas (c_parser *, struct c_expr *, tree = NULL_TREE); static struct c_expr c_parser_conditional_expression (c_parser *, struct c_expr *, tree); static struct c_expr c_parser_binary_expression (c_parser *, struct c_expr *, tree); static struct c_expr c_parser_cast_expression (c_parser *, struct c_expr *); static struct c_expr c_parser_unary_expression (c_parser *); static struct c_expr c_parser_sizeof_expression (c_parser *); static struct c_expr c_parser_alignof_expression (c_parser *); static struct c_expr c_parser_postfix_expression (c_parser *); static struct c_expr c_parser_postfix_expression_after_paren_type (c_parser *, struct c_type_name *, location_t); static struct c_expr c_parser_postfix_expression_after_primary (c_parser *, location_t loc, struct c_expr); static tree c_parser_transaction (c_parser *, enum rid); static struct c_expr c_parser_transaction_expression (c_parser *, enum rid); static tree c_parser_transaction_cancel (c_parser *); static struct c_expr c_parser_expression (c_parser *); static struct c_expr c_parser_expression_conv (c_parser *); static vec<tree, va_gc> *c_parser_expr_list (c_parser *, bool, bool, vec<tree, va_gc> **, location_t *, tree *, vec<location_t> *, unsigned int * = NULL); static void c_parser_oacc_declare (c_parser *); static void c_parser_oacc_enter_exit_data (c_parser *, bool); static void c_parser_oacc_update (c_parser *); static void c_parser_omp_construct (c_parser *, bool *); static void c_parser_omp_threadprivate (c_parser *); static void c_parser_omp_barrier (c_parser *); static void c_parser_omp_flush (c_parser *); static tree c_parser_omp_for_loop (location_t, c_parser *, enum tree_code, tree, tree *, bool *); static void c_parser_omp_taskwait (c_parser *); static void c_parser_omp_taskyield (c_parser *); static void c_parser_omp_cancel (c_parser *); enum pragma_context { pragma_external, pragma_struct, pragma_param, pragma_stmt, pragma_compound }; static bool c_parser_pragma (c_parser *, enum pragma_context, bool *); static void c_parser_omp_cancellation_point (c_parser *, enum pragma_context); static bool c_parser_omp_target (c_parser *, enum pragma_context, bool *); static void c_parser_omp_end_declare_target (c_parser *); static void c_parser_omp_declare (c_parser *, enum pragma_context); static bool c_parser_omp_ordered (c_parser *, enum pragma_context, bool *); static void c_parser_oacc_routine (c_parser *, enum pragma_context); /* These Objective-C parser functions are only ever called when compiling Objective-C. */ static void c_parser_objc_class_definition (c_parser *, tree); static void c_parser_objc_class_instance_variables (c_parser *); static void c_parser_objc_class_declaration (c_parser *); static void c_parser_objc_alias_declaration (c_parser *); static void c_parser_objc_protocol_definition (c_parser *, tree); static bool c_parser_objc_method_type (c_parser *); static void c_parser_objc_method_definition (c_parser *); static void c_parser_objc_methodprotolist (c_parser *); static void c_parser_objc_methodproto (c_parser *); static tree c_parser_objc_method_decl (c_parser *, bool, tree *, tree *); static tree c_parser_objc_type_name (c_parser *); static tree c_parser_objc_protocol_refs (c_parser *); static void c_parser_objc_try_catch_finally_statement (c_parser *); static void c_parser_objc_synchronized_statement (c_parser *); static tree c_parser_objc_selector (c_parser *); static tree c_parser_objc_selector_arg (c_parser *); static tree c_parser_objc_receiver (c_parser *); static tree c_parser_objc_message_args (c_parser *); static tree c_parser_objc_keywordexpr (c_parser *); static void c_parser_objc_at_property_declaration (c_parser *); static void c_parser_objc_at_synthesize_declaration (c_parser *); static void c_parser_objc_at_dynamic_declaration (c_parser *); static bool c_parser_objc_diagnose_bad_element_prefix (c_parser *, struct c_declspecs *); static void c_parser_parse_rtl_body (c_parser *parser, char *start_with_pass); /* Parse a translation unit (C90 6.7, C99 6.9, C11 6.9). translation-unit: external-declarations external-declarations: external-declaration external-declarations external-declaration GNU extensions: translation-unit: empty */ static void c_parser_translation_unit (c_parser *parser) { if (c_parser_next_token_is (parser, CPP_EOF)) { pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic, "ISO C forbids an empty translation unit"); } else { void *obstack_position = obstack_alloc (&parser_obstack, 0); mark_valid_location_for_stdc_pragma (false); do { ggc_collect (); c_parser_external_declaration (parser); obstack_free (&parser_obstack, obstack_position); } while (c_parser_next_token_is_not (parser, CPP_EOF)); } unsigned int i; tree decl; FOR_EACH_VEC_ELT (incomplete_record_decls, i, decl) if (DECL_SIZE (decl) == NULL_TREE && TREE_TYPE (decl) != error_mark_node) error ("storage size of %q+D isn%'t known", decl); } /* Parse an external declaration (C90 6.7, C99 6.9, C11 6.9). external-declaration: function-definition declaration GNU extensions: external-declaration: asm-definition ; __extension__ external-declaration Objective-C: external-declaration: objc-class-definition objc-class-declaration objc-alias-declaration objc-protocol-definition objc-method-definition @end */ static void c_parser_external_declaration (c_parser *parser) { int ext; switch (c_parser_peek_token (parser)->type) { case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_EXTENSION: ext = disable_extension_diagnostics (); c_parser_consume_token (parser); c_parser_external_declaration (parser); restore_extension_diagnostics (ext); break; case RID_ASM: c_parser_asm_definition (parser); break; case RID_AT_INTERFACE: case RID_AT_IMPLEMENTATION: gcc_assert (c_dialect_objc ()); c_parser_objc_class_definition (parser, NULL_TREE); break; case RID_AT_CLASS: gcc_assert (c_dialect_objc ()); c_parser_objc_class_declaration (parser); break; case RID_AT_ALIAS: gcc_assert (c_dialect_objc ()); c_parser_objc_alias_declaration (parser); break; case RID_AT_PROTOCOL: gcc_assert (c_dialect_objc ()); c_parser_objc_protocol_definition (parser, NULL_TREE); break; case RID_AT_PROPERTY: gcc_assert (c_dialect_objc ()); c_parser_objc_at_property_declaration (parser); break; case RID_AT_SYNTHESIZE: gcc_assert (c_dialect_objc ()); c_parser_objc_at_synthesize_declaration (parser); break; case RID_AT_DYNAMIC: gcc_assert (c_dialect_objc ()); c_parser_objc_at_dynamic_declaration (parser); break; case RID_AT_END: gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); objc_finish_implementation (); break; default: goto decl_or_fndef; } break; case CPP_SEMICOLON: pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic, "ISO C does not allow extra %<;%> outside of a function"); c_parser_consume_token (parser); break; case CPP_PRAGMA: mark_valid_location_for_stdc_pragma (true); c_parser_pragma (parser, pragma_external, NULL); mark_valid_location_for_stdc_pragma (false); break; case CPP_PLUS: case CPP_MINUS: if (c_dialect_objc ()) { c_parser_objc_method_definition (parser); break; } /* Else fall through, and yield a syntax error trying to parse as a declaration or function definition. */ /* FALLTHRU */ default: decl_or_fndef: /* A declaration or a function definition (or, in Objective-C, an @interface or @protocol with prefix attributes). We can only tell which after parsing the declaration specifiers, if any, and the first declarator. */ c_parser_declaration_or_fndef (parser, true, true, true, false, true, NULL, vNULL); break; } } static void c_finish_omp_declare_simd (c_parser *, tree, tree, vec<c_token>); static void c_finish_oacc_routine (struct oacc_routine_data *, tree, bool); /* Build and add a DEBUG_BEGIN_STMT statement with location LOC. */ static void add_debug_begin_stmt (location_t loc) { /* Don't add DEBUG_BEGIN_STMTs outside of functions, see PR84721. */ if (!MAY_HAVE_DEBUG_MARKER_STMTS || !building_stmt_list_p ()) return; tree stmt = build0 (DEBUG_BEGIN_STMT, void_type_node); SET_EXPR_LOCATION (stmt, loc); add_stmt (stmt); } /* Parse a declaration or function definition (C90 6.5, 6.7.1, C99 6.7, 6.9.1, C11 6.7, 6.9.1). If FNDEF_OK is true, a function definition is accepted; otherwise (old-style parameter declarations) only other declarations are accepted. If STATIC_ASSERT_OK is true, a static assertion is accepted; otherwise (old-style parameter declarations) it is not. If NESTED is true, we are inside a function or parsing old-style parameter declarations; any functions encountered are nested functions and declaration specifiers are required; otherwise we are at top level and functions are normal functions and declaration specifiers may be optional. If EMPTY_OK is true, empty declarations are OK (subject to all other constraints); otherwise (old-style parameter declarations) they are diagnosed. If START_ATTR_OK is true, the declaration specifiers may start with attributes; otherwise they may not. OBJC_FOREACH_OBJECT_DECLARATION can be used to get back the parsed declaration when parsing an Objective-C foreach statement. FALLTHRU_ATTR_P is used to signal whether this function parsed "__attribute__((fallthrough));". declaration: declaration-specifiers init-declarator-list[opt] ; static_assert-declaration function-definition: declaration-specifiers[opt] declarator declaration-list[opt] compound-statement declaration-list: declaration declaration-list declaration init-declarator-list: init-declarator init-declarator-list , init-declarator init-declarator: declarator simple-asm-expr[opt] attributes[opt] declarator simple-asm-expr[opt] attributes[opt] = initializer GNU extensions: nested-function-definition: declaration-specifiers declarator declaration-list[opt] compound-statement attribute ; Objective-C: attributes objc-class-definition attributes objc-category-definition attributes objc-protocol-definition The simple-asm-expr and attributes are GNU extensions. This function does not handle __extension__; that is handled in its callers. ??? Following the old parser, __extension__ may start external declarations, declarations in functions and declarations at the start of "for" loops, but not old-style parameter declarations. C99 requires declaration specifiers in a function definition; the absence is diagnosed through the diagnosis of implicit int. In GNU C we also allow but diagnose declarations without declaration specifiers, but only at top level (elsewhere they conflict with other syntax). In Objective-C, declarations of the looping variable in a foreach statement are exceptionally terminated by 'in' (for example, 'for (NSObject *object in array) { ... }'). OpenMP: declaration: threadprivate-directive GIMPLE: gimple-function-definition: declaration-specifiers[opt] __GIMPLE (gimple-or-rtl-pass-list) declarator declaration-list[opt] compound-statement rtl-function-definition: declaration-specifiers[opt] __RTL (gimple-or-rtl-pass-list) declarator declaration-list[opt] compound-statement */ static void c_parser_declaration_or_fndef (c_parser *parser, bool fndef_ok, bool static_assert_ok, bool empty_ok, bool nested, bool start_attr_ok, tree *objc_foreach_object_declaration, vec<c_token> omp_declare_simd_clauses, struct oacc_routine_data *oacc_routine_data, bool *fallthru_attr_p) { struct c_declspecs *specs; tree prefix_attrs; tree all_prefix_attrs; bool diagnosed_no_specs = false; location_t here = c_parser_peek_token (parser)->location; add_debug_begin_stmt (c_parser_peek_token (parser)->location); if (static_assert_ok && c_parser_next_token_is_keyword (parser, RID_STATIC_ASSERT)) { c_parser_static_assert_declaration (parser); return; } specs = build_null_declspecs (); /* Try to detect an unknown type name when we have "A B" or "A *B". */ if (c_parser_peek_token (parser)->type == CPP_NAME && c_parser_peek_token (parser)->id_kind == C_ID_ID && (c_parser_peek_2nd_token (parser)->type == CPP_NAME || c_parser_peek_2nd_token (parser)->type == CPP_MULT) && (!nested || !lookup_name (c_parser_peek_token (parser)->value))) { tree name = c_parser_peek_token (parser)->value; /* Issue a warning about NAME being an unknown type name, perhaps with some kind of hint. If the user forgot a "struct" etc, suggest inserting it. Otherwise, attempt to look for misspellings. */ gcc_rich_location richloc (here); if (tag_exists_p (RECORD_TYPE, name)) { /* This is not C++ with its implicit typedef. */ richloc.add_fixit_insert_before ("struct "); error_at (&richloc, "unknown type name %qE;" " use %<struct%> keyword to refer to the type", name); } else if (tag_exists_p (UNION_TYPE, name)) { richloc.add_fixit_insert_before ("union "); error_at (&richloc, "unknown type name %qE;" " use %<union%> keyword to refer to the type", name); } else if (tag_exists_p (ENUMERAL_TYPE, name)) { richloc.add_fixit_insert_before ("enum "); error_at (&richloc, "unknown type name %qE;" " use %<enum%> keyword to refer to the type", name); } else { name_hint hint = lookup_name_fuzzy (name, FUZZY_LOOKUP_TYPENAME, here); if (hint) { richloc.add_fixit_replace (hint.suggestion ()); error_at (&richloc, "unknown type name %qE; did you mean %qs?", name, hint.suggestion ()); } else error_at (here, "unknown type name %qE", name); } /* Parse declspecs normally to get a correct pointer type, but avoid a further "fails to be a type name" error. Refuse nested functions since it is not how the user likely wants us to recover. */ c_parser_peek_token (parser)->type = CPP_KEYWORD; c_parser_peek_token (parser)->keyword = RID_VOID; c_parser_peek_token (parser)->value = error_mark_node; fndef_ok = !nested; } c_parser_declspecs (parser, specs, true, true, start_attr_ok, true, true, cla_nonabstract_decl); if (parser->error) { c_parser_skip_to_end_of_block_or_statement (parser); return; } if (nested && !specs->declspecs_seen_p) { c_parser_error (parser, "expected declaration specifiers"); c_parser_skip_to_end_of_block_or_statement (parser); return; } finish_declspecs (specs); bool auto_type_p = specs->typespec_word == cts_auto_type; if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { if (auto_type_p) error_at (here, "%<__auto_type%> in empty declaration"); else if (specs->typespec_kind == ctsk_none && attribute_fallthrough_p (specs->attrs)) { if (fallthru_attr_p != NULL) *fallthru_attr_p = true; tree fn = build_call_expr_internal_loc (here, IFN_FALLTHROUGH, void_type_node, 0); add_stmt (fn); } else if (empty_ok) shadow_tag (specs); else { shadow_tag_warned (specs, 1); pedwarn (here, 0, "empty declaration"); } c_parser_consume_token (parser); if (oacc_routine_data) c_finish_oacc_routine (oacc_routine_data, NULL_TREE, false); return; } /* Provide better error recovery. Note that a type name here is usually better diagnosed as a redeclaration. */ if (empty_ok && specs->typespec_kind == ctsk_tagdef && c_parser_next_token_starts_declspecs (parser) && !c_parser_next_token_is (parser, CPP_NAME)) { c_parser_error (parser, "expected %<;%>, identifier or %<(%>"); parser->error = false; shadow_tag_warned (specs, 1); return; } else if (c_dialect_objc () && !auto_type_p) { /* Prefix attributes are an error on method decls. */ switch (c_parser_peek_token (parser)->type) { case CPP_PLUS: case CPP_MINUS: if (c_parser_objc_diagnose_bad_element_prefix (parser, specs)) return; if (specs->attrs) { warning_at (c_parser_peek_token (parser)->location, OPT_Wattributes, "prefix attributes are ignored for methods"); specs->attrs = NULL_TREE; } if (fndef_ok) c_parser_objc_method_definition (parser); else c_parser_objc_methodproto (parser); return; break; default: break; } /* This is where we parse 'attributes @interface ...', 'attributes @implementation ...', 'attributes @protocol ...' (where attributes could be, for example, __attribute__ ((deprecated)). */ switch (c_parser_peek_token (parser)->keyword) { case RID_AT_INTERFACE: { if (c_parser_objc_diagnose_bad_element_prefix (parser, specs)) return; c_parser_objc_class_definition (parser, specs->attrs); return; } break; case RID_AT_IMPLEMENTATION: { if (c_parser_objc_diagnose_bad_element_prefix (parser, specs)) return; if (specs->attrs) { warning_at (c_parser_peek_token (parser)->location, OPT_Wattributes, "prefix attributes are ignored for implementations"); specs->attrs = NULL_TREE; } c_parser_objc_class_definition (parser, NULL_TREE); return; } break; case RID_AT_PROTOCOL: { if (c_parser_objc_diagnose_bad_element_prefix (parser, specs)) return; c_parser_objc_protocol_definition (parser, specs->attrs); return; } break; case RID_AT_ALIAS: case RID_AT_CLASS: case RID_AT_END: case RID_AT_PROPERTY: if (specs->attrs) { c_parser_error (parser, "unexpected attribute"); specs->attrs = NULL; } break; default: break; } } else if (attribute_fallthrough_p (specs->attrs)) warning_at (here, OPT_Wattributes, "%<fallthrough%> attribute not followed by %<;%>"); pending_xref_error (); prefix_attrs = specs->attrs; all_prefix_attrs = prefix_attrs; specs->attrs = NULL_TREE; while (true) { struct c_declarator *declarator; bool dummy = false; timevar_id_t tv; tree fnbody = NULL_TREE; /* Declaring either one or more declarators (in which case we should diagnose if there were no declaration specifiers) or a function definition (in which case the diagnostic for implicit int suffices). */ declarator = c_parser_declarator (parser, specs->typespec_kind != ctsk_none, C_DTR_NORMAL, &dummy); if (declarator == NULL) { if (omp_declare_simd_clauses.exists ()) c_finish_omp_declare_simd (parser, NULL_TREE, NULL_TREE, omp_declare_simd_clauses); if (oacc_routine_data) c_finish_oacc_routine (oacc_routine_data, NULL_TREE, false); c_parser_skip_to_end_of_block_or_statement (parser); return; } if (auto_type_p && declarator->kind != cdk_id) { error_at (here, "%<__auto_type%> requires a plain identifier" " as declarator"); c_parser_skip_to_end_of_block_or_statement (parser); return; } if (c_parser_next_token_is (parser, CPP_EQ) || c_parser_next_token_is (parser, CPP_COMMA) || c_parser_next_token_is (parser, CPP_SEMICOLON) || c_parser_next_token_is_keyword (parser, RID_ASM) || c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE) || c_parser_next_token_is_keyword (parser, RID_IN)) { tree asm_name = NULL_TREE; tree postfix_attrs = NULL_TREE; if (!diagnosed_no_specs && !specs->declspecs_seen_p) { diagnosed_no_specs = true; pedwarn (here, 0, "data definition has no type or storage class"); } /* Having seen a data definition, there cannot now be a function definition. */ fndef_ok = false; if (c_parser_next_token_is_keyword (parser, RID_ASM)) asm_name = c_parser_simple_asm_expr (parser); if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) { postfix_attrs = c_parser_attributes (parser); if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { /* This means there is an attribute specifier after the declarator in a function definition. Provide some more information for the user. */ error_at (here, "attributes should be specified before the " "declarator in a function definition"); c_parser_skip_to_end_of_block_or_statement (parser); return; } } if (c_parser_next_token_is (parser, CPP_EQ)) { tree d; struct c_expr init; location_t init_loc; c_parser_consume_token (parser); if (auto_type_p) { init_loc = c_parser_peek_token (parser)->location; rich_location richloc (line_table, init_loc); start_init (NULL_TREE, asm_name, global_bindings_p (), &richloc); /* A parameter is initialized, which is invalid. Don't attempt to instrument the initializer. */ int flag_sanitize_save = flag_sanitize; if (nested && !empty_ok) flag_sanitize = 0; init = c_parser_expr_no_commas (parser, NULL); flag_sanitize = flag_sanitize_save; if (TREE_CODE (init.value) == COMPONENT_REF && DECL_C_BIT_FIELD (TREE_OPERAND (init.value, 1))) error_at (here, "%<__auto_type%> used with a bit-field" " initializer"); init = convert_lvalue_to_rvalue (init_loc, init, true, true); tree init_type = TREE_TYPE (init.value); /* As with typeof, remove all qualifiers from atomic types. */ if (init_type != error_mark_node && TYPE_ATOMIC (init_type)) init_type = c_build_qualified_type (init_type, TYPE_UNQUALIFIED); bool vm_type = variably_modified_type_p (init_type, NULL_TREE); if (vm_type) init.value = save_expr (init.value); finish_init (); specs->typespec_kind = ctsk_typeof; specs->locations[cdw_typedef] = init_loc; specs->typedef_p = true; specs->type = init_type; if (vm_type) { bool maybe_const = true; tree type_expr = c_fully_fold (init.value, false, &maybe_const); specs->expr_const_operands &= maybe_const; if (specs->expr) specs->expr = build2 (COMPOUND_EXPR, TREE_TYPE (type_expr), specs->expr, type_expr); else specs->expr = type_expr; } d = start_decl (declarator, specs, true, chainon (postfix_attrs, all_prefix_attrs)); if (!d) d = error_mark_node; if (omp_declare_simd_clauses.exists ()) c_finish_omp_declare_simd (parser, d, NULL_TREE, omp_declare_simd_clauses); } else { /* The declaration of the variable is in effect while its initializer is parsed. */ d = start_decl (declarator, specs, true, chainon (postfix_attrs, all_prefix_attrs)); if (!d) d = error_mark_node; if (omp_declare_simd_clauses.exists ()) c_finish_omp_declare_simd (parser, d, NULL_TREE, omp_declare_simd_clauses); init_loc = c_parser_peek_token (parser)->location; rich_location richloc (line_table, init_loc); start_init (d, asm_name, global_bindings_p (), &richloc); /* A parameter is initialized, which is invalid. Don't attempt to instrument the initializer. */ int flag_sanitize_save = flag_sanitize; if (TREE_CODE (d) == PARM_DECL) flag_sanitize = 0; init = c_parser_initializer (parser); flag_sanitize = flag_sanitize_save; finish_init (); } if (oacc_routine_data) c_finish_oacc_routine (oacc_routine_data, d, false); if (d != error_mark_node) { maybe_warn_string_init (init_loc, TREE_TYPE (d), init); finish_decl (d, init_loc, init.value, init.original_type, asm_name); } } else { if (auto_type_p) { error_at (here, "%<__auto_type%> requires an initialized " "data declaration"); c_parser_skip_to_end_of_block_or_statement (parser); return; } tree d = start_decl (declarator, specs, false, chainon (postfix_attrs, all_prefix_attrs)); if (d && TREE_CODE (d) == FUNCTION_DECL) if (declarator->kind == cdk_function) if (DECL_ARGUMENTS (d) == NULL_TREE) DECL_ARGUMENTS (d) = declarator->u.arg_info->parms; if (omp_declare_simd_clauses.exists ()) { tree parms = NULL_TREE; if (d && TREE_CODE (d) == FUNCTION_DECL) { struct c_declarator *ce = declarator; while (ce != NULL) if (ce->kind == cdk_function) { parms = ce->u.arg_info->parms; break; } else ce = ce->declarator; } if (parms) temp_store_parm_decls (d, parms); c_finish_omp_declare_simd (parser, d, parms, omp_declare_simd_clauses); if (parms) temp_pop_parm_decls (); } if (oacc_routine_data) c_finish_oacc_routine (oacc_routine_data, d, false); if (d) finish_decl (d, UNKNOWN_LOCATION, NULL_TREE, NULL_TREE, asm_name); if (c_parser_next_token_is_keyword (parser, RID_IN)) { if (d) *objc_foreach_object_declaration = d; else *objc_foreach_object_declaration = error_mark_node; } } if (c_parser_next_token_is (parser, CPP_COMMA)) { if (auto_type_p) { error_at (here, "%<__auto_type%> may only be used with" " a single declarator"); c_parser_skip_to_end_of_block_or_statement (parser); return; } c_parser_consume_token (parser); if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) all_prefix_attrs = chainon (c_parser_attributes (parser), prefix_attrs); else all_prefix_attrs = prefix_attrs; continue; } else if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { c_parser_consume_token (parser); return; } else if (c_parser_next_token_is_keyword (parser, RID_IN)) { /* This can only happen in Objective-C: we found the 'in' that terminates the declaration inside an Objective-C foreach statement. Do not consume the token, so that the caller can use it to determine that this indeed is a foreach context. */ return; } else { c_parser_error (parser, "expected %<,%> or %<;%>"); c_parser_skip_to_end_of_block_or_statement (parser); return; } } else if (auto_type_p) { error_at (here, "%<__auto_type%> requires an initialized data declaration"); c_parser_skip_to_end_of_block_or_statement (parser); return; } else if (!fndef_ok) { c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, " "%<asm%> or %<__attribute__%>"); c_parser_skip_to_end_of_block_or_statement (parser); return; } /* Function definition (nested or otherwise). */ if (nested) { pedwarn (here, OPT_Wpedantic, "ISO C forbids nested functions"); c_push_function_context (); } if (!start_function (specs, declarator, all_prefix_attrs)) { /* At this point we've consumed: declaration-specifiers declarator and the next token isn't CPP_EQ, CPP_COMMA, CPP_SEMICOLON, RID_ASM, RID_ATTRIBUTE, or RID_IN, but the declaration-specifiers declarator aren't grokkable as a function definition, so we have an error. */ gcc_assert (!c_parser_next_token_is (parser, CPP_SEMICOLON)); if (c_parser_next_token_starts_declspecs (parser)) { /* If we have declaration-specifiers declarator decl-specs then assume we have a missing semicolon, which would give us: declaration-specifiers declarator decl-specs ^ ; <~~~~~~~~~ declaration ~~~~~~~~~~> Use c_parser_require to get an error with a fix-it hint. */ c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>"); parser->error = false; } else { /* This can appear in many cases looking nothing like a function definition, so we don't give a more specific error suggesting there was one. */ c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, %<asm%> " "or %<__attribute__%>"); } if (nested) c_pop_function_context (); break; } if (DECL_DECLARED_INLINE_P (current_function_decl)) tv = TV_PARSE_INLINE; else tv = TV_PARSE_FUNC; auto_timevar at (g_timer, tv); /* Parse old-style parameter declarations. ??? Attributes are not allowed to start declaration specifiers here because of a syntax conflict between a function declaration with attribute suffix and a function definition with an attribute prefix on first old-style parameter declaration. Following the old parser, they are not accepted on subsequent old-style parameter declarations either. However, there is no ambiguity after the first declaration, nor indeed on the first as long as we don't allow postfix attributes after a declarator with a nonempty identifier list in a definition; and postfix attributes have never been accepted here in function definitions either. */ while (c_parser_next_token_is_not (parser, CPP_EOF) && c_parser_next_token_is_not (parser, CPP_OPEN_BRACE)) c_parser_declaration_or_fndef (parser, false, false, false, true, false, NULL, vNULL); store_parm_decls (); if (omp_declare_simd_clauses.exists ()) c_finish_omp_declare_simd (parser, current_function_decl, NULL_TREE, omp_declare_simd_clauses); if (oacc_routine_data) c_finish_oacc_routine (oacc_routine_data, current_function_decl, true); DECL_STRUCT_FUNCTION (current_function_decl)->function_start_locus = c_parser_peek_token (parser)->location; /* If the definition was marked with __GIMPLE then parse the function body as GIMPLE. */ if (specs->gimple_p) { cfun->pass_startwith = specs->gimple_or_rtl_pass; bool saved = in_late_binary_op; in_late_binary_op = true; c_parser_parse_gimple_body (parser); in_late_binary_op = saved; } /* Similarly, if it was marked with __RTL, use the RTL parser now, consuming the function body. */ else if (specs->rtl_p) { c_parser_parse_rtl_body (parser, specs->gimple_or_rtl_pass); /* Normally, store_parm_decls sets next_is_function_body, anticipating a function body. We need a push_scope/pop_scope pair to flush out this state, or subsequent function parsing will go wrong. */ push_scope (); pop_scope (); finish_function (); return; } else fnbody = c_parser_compound_statement (parser); tree fndecl = current_function_decl; if (nested) { tree decl = current_function_decl; /* Mark nested functions as needing static-chain initially. lower_nested_functions will recompute it but the DECL_STATIC_CHAIN flag is also used before that happens, by initializer_constant_valid_p. See gcc.dg/nested-fn-2.c. */ DECL_STATIC_CHAIN (decl) = 1; add_stmt (fnbody); finish_function (); c_pop_function_context (); add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl)); } else { if (fnbody) add_stmt (fnbody); finish_function (); } /* Get rid of the empty stmt list for GIMPLE. */ if (specs->gimple_p) DECL_SAVED_TREE (fndecl) = NULL_TREE; break; } } /* Parse an asm-definition (asm() outside a function body). This is a GNU extension. asm-definition: simple-asm-expr ; */ static void c_parser_asm_definition (c_parser *parser) { tree asm_str = c_parser_simple_asm_expr (parser); if (asm_str) symtab->finalize_toplevel_asm (asm_str); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* Parse a static assertion (C11 6.7.10). static_assert-declaration: static_assert-declaration-no-semi ; */ static void c_parser_static_assert_declaration (c_parser *parser) { c_parser_static_assert_declaration_no_semi (parser); if (parser->error || !c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>")) c_parser_skip_to_end_of_block_or_statement (parser); } /* Parse a static assertion (C11 6.7.10), without the trailing semicolon. static_assert-declaration-no-semi: _Static_assert ( constant-expression , string-literal ) */ static void c_parser_static_assert_declaration_no_semi (c_parser *parser) { location_t assert_loc, value_loc; tree value; tree string; gcc_assert (c_parser_next_token_is_keyword (parser, RID_STATIC_ASSERT)); assert_loc = c_parser_peek_token (parser)->location; if (flag_isoc99) pedwarn_c99 (assert_loc, OPT_Wpedantic, "ISO C99 does not support %<_Static_assert%>"); else pedwarn_c99 (assert_loc, OPT_Wpedantic, "ISO C90 does not support %<_Static_assert%>"); c_parser_consume_token (parser); matching_parens parens; if (!parens.require_open (parser)) return; location_t value_tok_loc = c_parser_peek_token (parser)->location; value = c_parser_expr_no_commas (parser, NULL).value; value_loc = EXPR_LOC_OR_LOC (value, value_tok_loc); parser->lex_untranslated_string = true; if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { parser->lex_untranslated_string = false; return; } switch (c_parser_peek_token (parser)->type) { case CPP_STRING: case CPP_STRING16: case CPP_STRING32: case CPP_WSTRING: case CPP_UTF8STRING: string = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); parser->lex_untranslated_string = false; break; default: c_parser_error (parser, "expected string literal"); parser->lex_untranslated_string = false; return; } parens.require_close (parser); if (!INTEGRAL_TYPE_P (TREE_TYPE (value))) { error_at (value_loc, "expression in static assertion is not an integer"); return; } if (TREE_CODE (value) != INTEGER_CST) { value = c_fully_fold (value, false, NULL); /* Strip no-op conversions. */ STRIP_TYPE_NOPS (value); if (TREE_CODE (value) == INTEGER_CST) pedwarn (value_loc, OPT_Wpedantic, "expression in static assertion " "is not an integer constant expression"); } if (TREE_CODE (value) != INTEGER_CST) { error_at (value_loc, "expression in static assertion is not constant"); return; } constant_expression_warning (value); if (integer_zerop (value)) error_at (assert_loc, "static assertion failed: %E", string); } /* Parse some declaration specifiers (possibly none) (C90 6.5, C99 6.7, C11 6.7), adding them to SPECS (which may already include some). Storage class specifiers are accepted iff SCSPEC_OK; type specifiers are accepted iff TYPESPEC_OK; alignment specifiers are accepted iff ALIGNSPEC_OK; attributes are accepted at the start iff START_ATTR_OK; __auto_type is accepted iff AUTO_TYPE_OK. declaration-specifiers: storage-class-specifier declaration-specifiers[opt] type-specifier declaration-specifiers[opt] type-qualifier declaration-specifiers[opt] function-specifier declaration-specifiers[opt] alignment-specifier declaration-specifiers[opt] Function specifiers (inline) are from C99, and are currently handled as storage class specifiers, as is __thread. Alignment specifiers are from C11. C90 6.5.1, C99 6.7.1, C11 6.7.1: storage-class-specifier: typedef extern static auto register _Thread_local (_Thread_local is new in C11.) C99 6.7.4, C11 6.7.4: function-specifier: inline _Noreturn (_Noreturn is new in C11.) C90 6.5.2, C99 6.7.2, C11 6.7.2: type-specifier: void char short int long float double signed unsigned _Bool _Complex [_Imaginary removed in C99 TC2] struct-or-union-specifier enum-specifier typedef-name atomic-type-specifier (_Bool and _Complex are new in C99.) (atomic-type-specifier is new in C11.) C90 6.5.3, C99 6.7.3, C11 6.7.3: type-qualifier: const restrict volatile address-space-qualifier _Atomic (restrict is new in C99.) (_Atomic is new in C11.) GNU extensions: declaration-specifiers: attributes declaration-specifiers[opt] type-qualifier: address-space address-space: identifier recognized by the target storage-class-specifier: __thread type-specifier: typeof-specifier __auto_type __intN _Decimal32 _Decimal64 _Decimal128 _Fract _Accum _Sat (_Fract, _Accum, and _Sat are new from ISO/IEC DTR 18037: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1169.pdf) atomic-type-specifier _Atomic ( type-name ) Objective-C: type-specifier: class-name objc-protocol-refs[opt] typedef-name objc-protocol-refs objc-protocol-refs */ void c_parser_declspecs (c_parser *parser, struct c_declspecs *specs, bool scspec_ok, bool typespec_ok, bool start_attr_ok, bool alignspec_ok, bool auto_type_ok, enum c_lookahead_kind la) { bool attrs_ok = start_attr_ok; bool seen_type = specs->typespec_kind != ctsk_none; if (!typespec_ok) gcc_assert (la == cla_prefer_id); while (c_parser_next_token_is (parser, CPP_NAME) || c_parser_next_token_is (parser, CPP_KEYWORD) || (c_dialect_objc () && c_parser_next_token_is (parser, CPP_LESS))) { struct c_typespec t; tree attrs; tree align; location_t loc = c_parser_peek_token (parser)->location; /* If we cannot accept a type, exit if the next token must start one. Also, if we already have seen a tagged definition, a typename would be an error anyway and likely the user has simply forgotten a semicolon, so we exit. */ if ((!typespec_ok || specs->typespec_kind == ctsk_tagdef) && c_parser_next_tokens_start_typename (parser, la) && !c_parser_next_token_is_qualifier (parser) && !c_parser_next_token_is_keyword (parser, RID_ALIGNAS)) break; if (c_parser_next_token_is (parser, CPP_NAME)) { c_token *name_token = c_parser_peek_token (parser); tree value = name_token->value; c_id_kind kind = name_token->id_kind; if (kind == C_ID_ADDRSPACE) { addr_space_t as = name_token->keyword - RID_FIRST_ADDR_SPACE; declspecs_add_addrspace (name_token->location, specs, as); c_parser_consume_token (parser); attrs_ok = true; continue; } gcc_assert (!c_parser_next_token_is_qualifier (parser)); /* If we cannot accept a type, and the next token must start one, exit. Do the same if we already have seen a tagged definition, since it would be an error anyway and likely the user has simply forgotten a semicolon. */ if (seen_type || !c_parser_next_tokens_start_typename (parser, la)) break; /* Now at an unknown typename (C_ID_ID), a C_ID_TYPENAME or a C_ID_CLASSNAME. */ c_parser_consume_token (parser); seen_type = true; attrs_ok = true; if (kind == C_ID_ID) { error_at (loc, "unknown type name %qE", value); t.kind = ctsk_typedef; t.spec = error_mark_node; } else if (kind == C_ID_TYPENAME && (!c_dialect_objc () || c_parser_next_token_is_not (parser, CPP_LESS))) { t.kind = ctsk_typedef; /* For a typedef name, record the meaning, not the name. In case of 'foo foo, bar;'. */ t.spec = lookup_name (value); } else { tree proto = NULL_TREE; gcc_assert (c_dialect_objc ()); t.kind = ctsk_objc; if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); t.spec = objc_get_protocol_qualified_type (value, proto); } t.expr = NULL_TREE; t.expr_const_operands = true; declspecs_add_type (name_token->location, specs, t); continue; } if (c_parser_next_token_is (parser, CPP_LESS)) { /* Make "<SomeProtocol>" equivalent to "id <SomeProtocol>" - nisse@lysator.liu.se. */ tree proto; gcc_assert (c_dialect_objc ()); if (!typespec_ok || seen_type) break; proto = c_parser_objc_protocol_refs (parser); t.kind = ctsk_objc; t.spec = objc_get_protocol_qualified_type (NULL_TREE, proto); t.expr = NULL_TREE; t.expr_const_operands = true; declspecs_add_type (loc, specs, t); continue; } gcc_assert (c_parser_next_token_is (parser, CPP_KEYWORD)); switch (c_parser_peek_token (parser)->keyword) { case RID_STATIC: case RID_EXTERN: case RID_REGISTER: case RID_TYPEDEF: case RID_INLINE: case RID_NORETURN: case RID_AUTO: case RID_THREAD: if (!scspec_ok) goto out; attrs_ok = true; /* TODO: Distinguish between function specifiers (inline, noreturn) and storage class specifiers, either here or in declspecs_add_scspec. */ declspecs_add_scspec (loc, specs, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); break; case RID_AUTO_TYPE: if (!auto_type_ok) goto out; /* Fall through. */ case RID_UNSIGNED: case RID_LONG: case RID_SHORT: case RID_SIGNED: case RID_COMPLEX: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: CASE_RID_FLOATN_NX: case RID_BOOL: case RID_FRACT: case RID_ACCUM: case RID_SAT: case RID_INT_N_0: case RID_INT_N_1: case RID_INT_N_2: case RID_INT_N_3: if (!typespec_ok) goto out; attrs_ok = true; seen_type = true; if (c_dialect_objc ()) parser->objc_need_raw_identifier = true; t.kind = ctsk_resword; t.spec = c_parser_peek_token (parser)->value; t.expr = NULL_TREE; t.expr_const_operands = true; declspecs_add_type (loc, specs, t); c_parser_consume_token (parser); break; case RID_ENUM: if (!typespec_ok) goto out; attrs_ok = true; seen_type = true; t = c_parser_enum_specifier (parser); invoke_plugin_callbacks (PLUGIN_FINISH_TYPE, t.spec); declspecs_add_type (loc, specs, t); break; case RID_STRUCT: case RID_UNION: if (!typespec_ok) goto out; attrs_ok = true; seen_type = true; t = c_parser_struct_or_union_specifier (parser); invoke_plugin_callbacks (PLUGIN_FINISH_TYPE, t.spec); declspecs_add_type (loc, specs, t); break; case RID_TYPEOF: /* ??? The old parser rejected typeof after other type specifiers, but is a syntax error the best way of handling this? */ if (!typespec_ok || seen_type) goto out; attrs_ok = true; seen_type = true; t = c_parser_typeof_specifier (parser); declspecs_add_type (loc, specs, t); break; case RID_ATOMIC: /* C parser handling of Objective-C constructs needs checking for correct lvalue-to-rvalue conversions, and the code in build_modify_expr handling various Objective-C cases, and that in build_unary_op handling Objective-C cases for increment / decrement, also needs updating; uses of TYPE_MAIN_VARIANT in objc_compare_types and objc_types_are_equivalent may also need updates. */ if (c_dialect_objc ()) sorry ("%<_Atomic%> in Objective-C"); if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 does not support the %<_Atomic%> qualifier"); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 does not support the %<_Atomic%> qualifier"); attrs_ok = true; tree value; value = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (typespec_ok && c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { /* _Atomic ( type-name ). */ seen_type = true; c_parser_consume_token (parser); struct c_type_name *type = c_parser_type_name (parser); t.kind = ctsk_typeof; t.spec = error_mark_node; t.expr = NULL_TREE; t.expr_const_operands = true; if (type != NULL) t.spec = groktypename (type, &t.expr, &t.expr_const_operands); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (t.spec != error_mark_node) { if (TREE_CODE (t.spec) == ARRAY_TYPE) error_at (loc, "%<_Atomic%>-qualified array type"); else if (TREE_CODE (t.spec) == FUNCTION_TYPE) error_at (loc, "%<_Atomic%>-qualified function type"); else if (TYPE_QUALS (t.spec) != TYPE_UNQUALIFIED) error_at (loc, "%<_Atomic%> applied to a qualified type"); else t.spec = c_build_qualified_type (t.spec, TYPE_QUAL_ATOMIC); } declspecs_add_type (loc, specs, t); } else declspecs_add_qual (loc, specs, value); break; case RID_CONST: case RID_VOLATILE: case RID_RESTRICT: attrs_ok = true; declspecs_add_qual (loc, specs, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); break; case RID_ATTRIBUTE: if (!attrs_ok) goto out; attrs = c_parser_attributes (parser); declspecs_add_attrs (loc, specs, attrs); break; case RID_ALIGNAS: if (!alignspec_ok) goto out; align = c_parser_alignas_specifier (parser); declspecs_add_alignas (loc, specs, align); break; case RID_GIMPLE: if (! flag_gimple) error_at (loc, "%<__GIMPLE%> only valid with -fgimple"); c_parser_consume_token (parser); specs->gimple_p = true; specs->locations[cdw_gimple] = loc; specs->gimple_or_rtl_pass = c_parser_gimple_or_rtl_pass_list (parser); break; case RID_RTL: c_parser_consume_token (parser); specs->rtl_p = true; specs->locations[cdw_rtl] = loc; specs->gimple_or_rtl_pass = c_parser_gimple_or_rtl_pass_list (parser); break; default: goto out; } } out: ; } /* Parse an enum specifier (C90 6.5.2.2, C99 6.7.2.2, C11 6.7.2.2). enum-specifier: enum attributes[opt] identifier[opt] { enumerator-list } attributes[opt] enum attributes[opt] identifier[opt] { enumerator-list , } attributes[opt] enum attributes[opt] identifier The form with trailing comma is new in C99. The forms with attributes are GNU extensions. In GNU C, we accept any expression without commas in the syntax (assignment expressions, not just conditional expressions); assignment expressions will be diagnosed as non-constant. enumerator-list: enumerator enumerator-list , enumerator enumerator: enumeration-constant enumeration-constant = constant-expression GNU Extensions: enumerator: enumeration-constant attributes[opt] enumeration-constant attributes[opt] = constant-expression */ static struct c_typespec c_parser_enum_specifier (c_parser *parser) { struct c_typespec ret; tree attrs; tree ident = NULL_TREE; location_t enum_loc; location_t ident_loc = UNKNOWN_LOCATION; /* Quiet warning. */ gcc_assert (c_parser_next_token_is_keyword (parser, RID_ENUM)); c_parser_consume_token (parser); attrs = c_parser_attributes (parser); enum_loc = c_parser_peek_token (parser)->location; /* Set the location in case we create a decl now. */ c_parser_set_source_position_from_token (c_parser_peek_token (parser)); if (c_parser_next_token_is (parser, CPP_NAME)) { ident = c_parser_peek_token (parser)->value; ident_loc = c_parser_peek_token (parser)->location; enum_loc = ident_loc; c_parser_consume_token (parser); } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { /* Parse an enum definition. */ struct c_enum_contents the_enum; tree type; tree postfix_attrs; /* We chain the enumerators in reverse order, then put them in forward order at the end. */ tree values; timevar_push (TV_PARSE_ENUM); type = start_enum (enum_loc, &the_enum, ident); values = NULL_TREE; c_parser_consume_token (parser); while (true) { tree enum_id; tree enum_value; tree enum_decl; bool seen_comma; c_token *token; location_t comma_loc = UNKNOWN_LOCATION; /* Quiet warning. */ location_t decl_loc, value_loc; if (c_parser_next_token_is_not (parser, CPP_NAME)) { /* Give a nicer error for "enum {}". */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE) && !parser->error) { error_at (c_parser_peek_token (parser)->location, "empty enum is invalid"); parser->error = true; } else c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); values = error_mark_node; break; } token = c_parser_peek_token (parser); enum_id = token->value; /* Set the location in case we create a decl now. */ c_parser_set_source_position_from_token (token); decl_loc = value_loc = token->location; c_parser_consume_token (parser); /* Parse any specified attributes. */ tree enum_attrs = c_parser_attributes (parser); if (c_parser_next_token_is (parser, CPP_EQ)) { c_parser_consume_token (parser); value_loc = c_parser_peek_token (parser)->location; enum_value = c_parser_expr_no_commas (parser, NULL).value; } else enum_value = NULL_TREE; enum_decl = build_enumerator (decl_loc, value_loc, &the_enum, enum_id, enum_value); if (enum_attrs) decl_attributes (&TREE_PURPOSE (enum_decl), enum_attrs, 0); TREE_CHAIN (enum_decl) = values; values = enum_decl; seen_comma = false; if (c_parser_next_token_is (parser, CPP_COMMA)) { comma_loc = c_parser_peek_token (parser)->location; seen_comma = true; c_parser_consume_token (parser); } if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { if (seen_comma) pedwarn_c90 (comma_loc, OPT_Wpedantic, "comma at end of enumerator list"); c_parser_consume_token (parser); break; } if (!seen_comma) { c_parser_error (parser, "expected %<,%> or %<}%>"); c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); values = error_mark_node; break; } } postfix_attrs = c_parser_attributes (parser); ret.spec = finish_enum (type, nreverse (values), chainon (attrs, postfix_attrs)); ret.kind = ctsk_tagdef; ret.expr = NULL_TREE; ret.expr_const_operands = true; timevar_pop (TV_PARSE_ENUM); return ret; } else if (!ident) { c_parser_error (parser, "expected %<{%>"); ret.spec = error_mark_node; ret.kind = ctsk_tagref; ret.expr = NULL_TREE; ret.expr_const_operands = true; return ret; } ret = parser_xref_tag (ident_loc, ENUMERAL_TYPE, ident); /* In ISO C, enumerated types can be referred to only if already defined. */ if (pedantic && !COMPLETE_TYPE_P (ret.spec)) { gcc_assert (ident); pedwarn (enum_loc, OPT_Wpedantic, "ISO C forbids forward references to %<enum%> types"); } return ret; } /* Parse a struct or union specifier (C90 6.5.2.1, C99 6.7.2.1, C11 6.7.2.1). struct-or-union-specifier: struct-or-union attributes[opt] identifier[opt] { struct-contents } attributes[opt] struct-or-union attributes[opt] identifier struct-contents: struct-declaration-list struct-declaration-list: struct-declaration ; struct-declaration-list struct-declaration ; GNU extensions: struct-contents: empty struct-declaration struct-declaration-list struct-declaration struct-declaration-list: struct-declaration-list ; ; (Note that in the syntax here, unlike that in ISO C, the semicolons are included here rather than in struct-declaration, in order to describe the syntax with extra semicolons and missing semicolon at end.) Objective-C: struct-declaration-list: @defs ( class-name ) (Note this does not include a trailing semicolon, but can be followed by further declarations, and gets a pedwarn-if-pedantic when followed by a semicolon.) */ static struct c_typespec c_parser_struct_or_union_specifier (c_parser *parser) { struct c_typespec ret; tree attrs; tree ident = NULL_TREE; location_t struct_loc; location_t ident_loc = UNKNOWN_LOCATION; enum tree_code code; switch (c_parser_peek_token (parser)->keyword) { case RID_STRUCT: code = RECORD_TYPE; break; case RID_UNION: code = UNION_TYPE; break; default: gcc_unreachable (); } struct_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); attrs = c_parser_attributes (parser); /* Set the location in case we create a decl now. */ c_parser_set_source_position_from_token (c_parser_peek_token (parser)); if (c_parser_next_token_is (parser, CPP_NAME)) { ident = c_parser_peek_token (parser)->value; ident_loc = c_parser_peek_token (parser)->location; struct_loc = ident_loc; c_parser_consume_token (parser); } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { /* Parse a struct or union definition. Start the scope of the tag before parsing components. */ struct c_struct_parse_info *struct_info; tree type = start_struct (struct_loc, code, ident, &struct_info); tree postfix_attrs; /* We chain the components in reverse order, then put them in forward order at the end. Each struct-declaration may declare multiple components (comma-separated), so we must use chainon to join them, although when parsing each struct-declaration we can use TREE_CHAIN directly. The theory behind all this is that there will be more semicolon separated fields than comma separated fields, and so we'll be minimizing the number of node traversals required by chainon. */ tree contents; timevar_push (TV_PARSE_STRUCT); contents = NULL_TREE; c_parser_consume_token (parser); /* Handle the Objective-C @defs construct, e.g. foo(sizeof(struct{ @defs(ClassName) }));. */ if (c_parser_next_token_is_keyword (parser, RID_AT_DEFS)) { tree name; gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); matching_parens parens; if (!parens.require_open (parser)) goto end_at_defs; if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME) { name = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else { c_parser_error (parser, "expected class name"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); goto end_at_defs; } parens.skip_until_found_close (parser); contents = nreverse (objc_get_class_ivars (name)); } end_at_defs: /* Parse the struct-declarations and semicolons. Problems with semicolons are diagnosed here; empty structures are diagnosed elsewhere. */ while (true) { tree decls; /* Parse any stray semicolon. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { location_t semicolon_loc = c_parser_peek_token (parser)->location; gcc_rich_location richloc (semicolon_loc); richloc.add_fixit_remove (); pedwarn (&richloc, OPT_Wpedantic, "extra semicolon in struct or union specified"); c_parser_consume_token (parser); continue; } /* Stop if at the end of the struct or union contents. */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { c_parser_consume_token (parser); break; } /* Accept #pragmas at struct scope. */ if (c_parser_next_token_is (parser, CPP_PRAGMA)) { c_parser_pragma (parser, pragma_struct, NULL); continue; } /* Parse some comma-separated declarations, but not the trailing semicolon if any. */ decls = c_parser_struct_declaration (parser); contents = chainon (decls, contents); /* If no semicolon follows, either we have a parse error or are at the end of the struct or union and should pedwarn. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) c_parser_consume_token (parser); else { if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) pedwarn (c_parser_peek_token (parser)->location, 0, "no semicolon at end of struct or union"); else if (parser->error || !c_parser_next_token_starts_declspecs (parser)) { c_parser_error (parser, "expected %<;%>"); c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); break; } /* If we come here, we have already emitted an error for an expected `;', identifier or `(', and we also recovered already. Go on with the next field. */ } } postfix_attrs = c_parser_attributes (parser); ret.spec = finish_struct (struct_loc, type, nreverse (contents), chainon (attrs, postfix_attrs), struct_info); ret.kind = ctsk_tagdef; ret.expr = NULL_TREE; ret.expr_const_operands = true; timevar_pop (TV_PARSE_STRUCT); return ret; } else if (!ident) { c_parser_error (parser, "expected %<{%>"); ret.spec = error_mark_node; ret.kind = ctsk_tagref; ret.expr = NULL_TREE; ret.expr_const_operands = true; return ret; } ret = parser_xref_tag (ident_loc, code, ident); return ret; } /* Parse a struct-declaration (C90 6.5.2.1, C99 6.7.2.1, C11 6.7.2.1), *without* the trailing semicolon. struct-declaration: specifier-qualifier-list struct-declarator-list static_assert-declaration-no-semi specifier-qualifier-list: type-specifier specifier-qualifier-list[opt] type-qualifier specifier-qualifier-list[opt] alignment-specifier specifier-qualifier-list[opt] attributes specifier-qualifier-list[opt] struct-declarator-list: struct-declarator struct-declarator-list , attributes[opt] struct-declarator struct-declarator: declarator attributes[opt] declarator[opt] : constant-expression attributes[opt] GNU extensions: struct-declaration: __extension__ struct-declaration specifier-qualifier-list Unlike the ISO C syntax, semicolons are handled elsewhere. The use of attributes where shown is a GNU extension. In GNU C, we accept any expression without commas in the syntax (assignment expressions, not just conditional expressions); assignment expressions will be diagnosed as non-constant. */ static tree c_parser_struct_declaration (c_parser *parser) { struct c_declspecs *specs; tree prefix_attrs; tree all_prefix_attrs; tree decls; location_t decl_loc; if (c_parser_next_token_is_keyword (parser, RID_EXTENSION)) { int ext; tree decl; ext = disable_extension_diagnostics (); c_parser_consume_token (parser); decl = c_parser_struct_declaration (parser); restore_extension_diagnostics (ext); return decl; } if (c_parser_next_token_is_keyword (parser, RID_STATIC_ASSERT)) { c_parser_static_assert_declaration_no_semi (parser); return NULL_TREE; } specs = build_null_declspecs (); decl_loc = c_parser_peek_token (parser)->location; /* Strictly by the standard, we shouldn't allow _Alignas here, but it appears to have been intended to allow it there, so we're keeping it as it is until WG14 reaches a conclusion of N1731. <http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1731.pdf> */ c_parser_declspecs (parser, specs, false, true, true, true, false, cla_nonabstract_decl); if (parser->error) return NULL_TREE; if (!specs->declspecs_seen_p) { c_parser_error (parser, "expected specifier-qualifier-list"); return NULL_TREE; } finish_declspecs (specs); if (c_parser_next_token_is (parser, CPP_SEMICOLON) || c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { tree ret; if (specs->typespec_kind == ctsk_none) { pedwarn (decl_loc, OPT_Wpedantic, "ISO C forbids member declarations with no members"); shadow_tag_warned (specs, pedantic); ret = NULL_TREE; } else { /* Support for unnamed structs or unions as members of structs or unions (which is [a] useful and [b] supports MS P-SDK). */ tree attrs = NULL; ret = grokfield (c_parser_peek_token (parser)->location, build_id_declarator (NULL_TREE), specs, NULL_TREE, &attrs); if (ret) decl_attributes (&ret, attrs, 0); } return ret; } /* Provide better error recovery. Note that a type name here is valid, and will be treated as a field name. */ if (specs->typespec_kind == ctsk_tagdef && TREE_CODE (specs->type) != ENUMERAL_TYPE && c_parser_next_token_starts_declspecs (parser) && !c_parser_next_token_is (parser, CPP_NAME)) { c_parser_error (parser, "expected %<;%>, identifier or %<(%>"); parser->error = false; return NULL_TREE; } pending_xref_error (); prefix_attrs = specs->attrs; all_prefix_attrs = prefix_attrs; specs->attrs = NULL_TREE; decls = NULL_TREE; while (true) { /* Declaring one or more declarators or un-named bit-fields. */ struct c_declarator *declarator; bool dummy = false; if (c_parser_next_token_is (parser, CPP_COLON)) declarator = build_id_declarator (NULL_TREE); else declarator = c_parser_declarator (parser, specs->typespec_kind != ctsk_none, C_DTR_NORMAL, &dummy); if (declarator == NULL) { c_parser_skip_to_end_of_block_or_statement (parser); break; } if (c_parser_next_token_is (parser, CPP_COLON) || c_parser_next_token_is (parser, CPP_COMMA) || c_parser_next_token_is (parser, CPP_SEMICOLON) || c_parser_next_token_is (parser, CPP_CLOSE_BRACE) || c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) { tree postfix_attrs = NULL_TREE; tree width = NULL_TREE; tree d; if (c_parser_next_token_is (parser, CPP_COLON)) { c_parser_consume_token (parser); width = c_parser_expr_no_commas (parser, NULL).value; } if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) postfix_attrs = c_parser_attributes (parser); d = grokfield (c_parser_peek_token (parser)->location, declarator, specs, width, &all_prefix_attrs); decl_attributes (&d, chainon (postfix_attrs, all_prefix_attrs), 0); DECL_CHAIN (d) = decls; decls = d; if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) all_prefix_attrs = chainon (c_parser_attributes (parser), prefix_attrs); else all_prefix_attrs = prefix_attrs; if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else if (c_parser_next_token_is (parser, CPP_SEMICOLON) || c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { /* Semicolon consumed in caller. */ break; } else { c_parser_error (parser, "expected %<,%>, %<;%> or %<}%>"); break; } } else { c_parser_error (parser, "expected %<:%>, %<,%>, %<;%>, %<}%> or " "%<__attribute__%>"); break; } } return decls; } /* Parse a typeof specifier (a GNU extension). typeof-specifier: typeof ( expression ) typeof ( type-name ) */ static struct c_typespec c_parser_typeof_specifier (c_parser *parser) { struct c_typespec ret; ret.kind = ctsk_typeof; ret.spec = error_mark_node; ret.expr = NULL_TREE; ret.expr_const_operands = true; gcc_assert (c_parser_next_token_is_keyword (parser, RID_TYPEOF)); c_parser_consume_token (parser); c_inhibit_evaluation_warnings++; in_typeof++; matching_parens parens; if (!parens.require_open (parser)) { c_inhibit_evaluation_warnings--; in_typeof--; return ret; } if (c_parser_next_tokens_start_typename (parser, cla_prefer_id)) { struct c_type_name *type = c_parser_type_name (parser); c_inhibit_evaluation_warnings--; in_typeof--; if (type != NULL) { ret.spec = groktypename (type, &ret.expr, &ret.expr_const_operands); pop_maybe_used (variably_modified_type_p (ret.spec, NULL_TREE)); } } else { bool was_vm; location_t here = c_parser_peek_token (parser)->location; struct c_expr expr = c_parser_expression (parser); c_inhibit_evaluation_warnings--; in_typeof--; if (TREE_CODE (expr.value) == COMPONENT_REF && DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1))) error_at (here, "%<typeof%> applied to a bit-field"); mark_exp_read (expr.value); ret.spec = TREE_TYPE (expr.value); was_vm = variably_modified_type_p (ret.spec, NULL_TREE); /* This is returned with the type so that when the type is evaluated, this can be evaluated. */ if (was_vm) ret.expr = c_fully_fold (expr.value, false, &ret.expr_const_operands); pop_maybe_used (was_vm); /* For use in macros such as those in <stdatomic.h>, remove all qualifiers from atomic types. (const can be an issue for more macros using typeof than just the <stdatomic.h> ones.) */ if (ret.spec != error_mark_node && TYPE_ATOMIC (ret.spec)) ret.spec = c_build_qualified_type (ret.spec, TYPE_UNQUALIFIED); } parens.skip_until_found_close (parser); return ret; } /* Parse an alignment-specifier. C11 6.7.5: alignment-specifier: _Alignas ( type-name ) _Alignas ( constant-expression ) */ static tree c_parser_alignas_specifier (c_parser * parser) { tree ret = error_mark_node; location_t loc = c_parser_peek_token (parser)->location; gcc_assert (c_parser_next_token_is_keyword (parser, RID_ALIGNAS)); c_parser_consume_token (parser); if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 does not support %<_Alignas%>"); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 does not support %<_Alignas%>"); matching_parens parens; if (!parens.require_open (parser)) return ret; if (c_parser_next_tokens_start_typename (parser, cla_prefer_id)) { struct c_type_name *type = c_parser_type_name (parser); if (type != NULL) ret = c_sizeof_or_alignof_type (loc, groktypename (type, NULL, NULL), false, true, 1); } else ret = c_parser_expr_no_commas (parser, NULL).value; parens.skip_until_found_close (parser); return ret; } /* Parse a declarator, possibly an abstract declarator (C90 6.5.4, 6.5.5, C99 6.7.5, 6.7.6, C11 6.7.6, 6.7.7). If TYPE_SEEN_P then a typedef name may be redeclared; otherwise it may not. KIND indicates which kind of declarator is wanted. Returns a valid declarator except in the case of a syntax error in which case NULL is returned. *SEEN_ID is set to true if an identifier being declared is seen; this is used to diagnose bad forms of abstract array declarators and to determine whether an identifier list is syntactically permitted. declarator: pointer[opt] direct-declarator direct-declarator: identifier ( attributes[opt] declarator ) direct-declarator array-declarator direct-declarator ( parameter-type-list ) direct-declarator ( identifier-list[opt] ) pointer: * type-qualifier-list[opt] * type-qualifier-list[opt] pointer type-qualifier-list: type-qualifier attributes type-qualifier-list type-qualifier type-qualifier-list attributes array-declarator: [ type-qualifier-list[opt] assignment-expression[opt] ] [ static type-qualifier-list[opt] assignment-expression ] [ type-qualifier-list static assignment-expression ] [ type-qualifier-list[opt] * ] parameter-type-list: parameter-list parameter-list , ... parameter-list: parameter-declaration parameter-list , parameter-declaration parameter-declaration: declaration-specifiers declarator attributes[opt] declaration-specifiers abstract-declarator[opt] attributes[opt] identifier-list: identifier identifier-list , identifier abstract-declarator: pointer pointer[opt] direct-abstract-declarator direct-abstract-declarator: ( attributes[opt] abstract-declarator ) direct-abstract-declarator[opt] array-declarator direct-abstract-declarator[opt] ( parameter-type-list[opt] ) GNU extensions: direct-declarator: direct-declarator ( parameter-forward-declarations parameter-type-list[opt] ) direct-abstract-declarator: direct-abstract-declarator[opt] ( parameter-forward-declarations parameter-type-list[opt] ) parameter-forward-declarations: parameter-list ; parameter-forward-declarations parameter-list ; The uses of attributes shown above are GNU extensions. Some forms of array declarator are not included in C99 in the syntax for abstract declarators; these are disallowed elsewhere. This may be a defect (DR#289). This function also accepts an omitted abstract declarator as being an abstract declarator, although not part of the formal syntax. */ struct c_declarator * c_parser_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind, bool *seen_id) { /* Parse any initial pointer part. */ if (c_parser_next_token_is (parser, CPP_MULT)) { struct c_declspecs *quals_attrs = build_null_declspecs (); struct c_declarator *inner; c_parser_consume_token (parser); c_parser_declspecs (parser, quals_attrs, false, false, true, false, false, cla_prefer_id); inner = c_parser_declarator (parser, type_seen_p, kind, seen_id); if (inner == NULL) return NULL; else return make_pointer_declarator (quals_attrs, inner); } /* Now we have a direct declarator, direct abstract declarator or nothing (which counts as a direct abstract declarator here). */ return c_parser_direct_declarator (parser, type_seen_p, kind, seen_id); } /* Parse a direct declarator or direct abstract declarator; arguments as c_parser_declarator. */ static struct c_declarator * c_parser_direct_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind, bool *seen_id) { /* The direct declarator must start with an identifier (possibly omitted) or a parenthesized declarator (possibly abstract). In an ordinary declarator, initial parentheses must start a parenthesized declarator. In an abstract declarator or parameter declarator, they could start a parenthesized declarator or a parameter list. To tell which, the open parenthesis and any following attributes must be read. If a declaration specifier follows, then it is a parameter list; if the specifier is a typedef name, there might be an ambiguity about redeclaring it, which is resolved in the direction of treating it as a typedef name. If a close parenthesis follows, it is also an empty parameter list, as the syntax does not permit empty abstract declarators. Otherwise, it is a parenthesized declarator (in which case the analysis may be repeated inside it, recursively). ??? There is an ambiguity in a parameter declaration "int (__attribute__((foo)) x)", where x is not a typedef name: it could be an abstract declarator for a function, or declare x with parentheses. The proper resolution of this ambiguity needs documenting. At present we follow an accident of the old parser's implementation, whereby the first parameter must have some declaration specifiers other than just attributes. Thus as a parameter declaration it is treated as a parenthesized parameter named x, and as an abstract declarator it is rejected. ??? Also following the old parser, attributes inside an empty parameter list are ignored, making it a list not yielding a prototype, rather than giving an error or making it have one parameter with implicit type int. ??? Also following the old parser, typedef names may be redeclared in declarators, but not Objective-C class names. */ if (kind != C_DTR_ABSTRACT && c_parser_next_token_is (parser, CPP_NAME) && ((type_seen_p && (c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME || c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME)) || c_parser_peek_token (parser)->id_kind == C_ID_ID)) { struct c_declarator *inner = build_id_declarator (c_parser_peek_token (parser)->value); *seen_id = true; inner->id_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); return c_parser_direct_declarator_inner (parser, *seen_id, inner); } if (kind != C_DTR_NORMAL && c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) { struct c_declarator *inner = build_id_declarator (NULL_TREE); inner->id_loc = c_parser_peek_token (parser)->location; return c_parser_direct_declarator_inner (parser, *seen_id, inner); } /* Either we are at the end of an abstract declarator, or we have parentheses. */ if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { tree attrs; struct c_declarator *inner; c_parser_consume_token (parser); attrs = c_parser_attributes (parser); if (kind != C_DTR_NORMAL && (c_parser_next_token_starts_declspecs (parser) || c_parser_next_token_is (parser, CPP_CLOSE_PAREN))) { struct c_arg_info *args = c_parser_parms_declarator (parser, kind == C_DTR_NORMAL, attrs); if (args == NULL) return NULL; else { inner = build_function_declarator (args, build_id_declarator (NULL_TREE)); return c_parser_direct_declarator_inner (parser, *seen_id, inner); } } /* A parenthesized declarator. */ inner = c_parser_declarator (parser, type_seen_p, kind, seen_id); if (inner != NULL && attrs != NULL) inner = build_attrs_declarator (attrs, inner); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); if (inner == NULL) return NULL; else return c_parser_direct_declarator_inner (parser, *seen_id, inner); } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return NULL; } } else { if (kind == C_DTR_NORMAL) { c_parser_error (parser, "expected identifier or %<(%>"); return NULL; } else return build_id_declarator (NULL_TREE); } } /* Parse part of a direct declarator or direct abstract declarator, given that some (in INNER) has already been parsed; ID_PRESENT is true if an identifier is present, false for an abstract declarator. */ static struct c_declarator * c_parser_direct_declarator_inner (c_parser *parser, bool id_present, struct c_declarator *inner) { /* Parse a sequence of array declarators and parameter lists. */ if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) { location_t brace_loc = c_parser_peek_token (parser)->location; struct c_declarator *declarator; struct c_declspecs *quals_attrs = build_null_declspecs (); bool static_seen; bool star_seen; struct c_expr dimen; dimen.value = NULL_TREE; dimen.original_code = ERROR_MARK; dimen.original_type = NULL_TREE; c_parser_consume_token (parser); c_parser_declspecs (parser, quals_attrs, false, false, true, false, false, cla_prefer_id); static_seen = c_parser_next_token_is_keyword (parser, RID_STATIC); if (static_seen) c_parser_consume_token (parser); if (static_seen && !quals_attrs->declspecs_seen_p) c_parser_declspecs (parser, quals_attrs, false, false, true, false, false, cla_prefer_id); if (!quals_attrs->declspecs_seen_p) quals_attrs = NULL; /* If "static" is present, there must be an array dimension. Otherwise, there may be a dimension, "*", or no dimension. */ if (static_seen) { star_seen = false; dimen = c_parser_expr_no_commas (parser, NULL); } else { if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) { dimen.value = NULL_TREE; star_seen = false; } else if (c_parser_next_token_is (parser, CPP_MULT)) { if (c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_SQUARE) { dimen.value = NULL_TREE; star_seen = true; c_parser_consume_token (parser); } else { star_seen = false; dimen = c_parser_expr_no_commas (parser, NULL); } } else { star_seen = false; dimen = c_parser_expr_no_commas (parser, NULL); } } if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) c_parser_consume_token (parser); else { c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); return NULL; } if (dimen.value) dimen = convert_lvalue_to_rvalue (brace_loc, dimen, true, true); declarator = build_array_declarator (brace_loc, dimen.value, quals_attrs, static_seen, star_seen); if (declarator == NULL) return NULL; inner = set_array_declarator_inner (declarator, inner); return c_parser_direct_declarator_inner (parser, id_present, inner); } else if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { tree attrs; struct c_arg_info *args; c_parser_consume_token (parser); attrs = c_parser_attributes (parser); args = c_parser_parms_declarator (parser, id_present, attrs); if (args == NULL) return NULL; else { inner = build_function_declarator (args, inner); return c_parser_direct_declarator_inner (parser, id_present, inner); } } return inner; } /* Parse a parameter list or identifier list, including the closing parenthesis but not the opening one. ATTRS are the attributes at the start of the list. ID_LIST_OK is true if an identifier list is acceptable; such a list must not have attributes at the start. */ static struct c_arg_info * c_parser_parms_declarator (c_parser *parser, bool id_list_ok, tree attrs) { push_scope (); declare_parm_level (); /* If the list starts with an identifier, it is an identifier list. Otherwise, it is either a prototype list or an empty list. */ if (id_list_ok && !attrs && c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID /* Look ahead to detect typos in type names. */ && c_parser_peek_2nd_token (parser)->type != CPP_NAME && c_parser_peek_2nd_token (parser)->type != CPP_MULT && c_parser_peek_2nd_token (parser)->type != CPP_OPEN_PAREN && c_parser_peek_2nd_token (parser)->type != CPP_OPEN_SQUARE && c_parser_peek_2nd_token (parser)->type != CPP_KEYWORD) { tree list = NULL_TREE, *nextp = &list; while (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID) { *nextp = build_tree_list (NULL_TREE, c_parser_peek_token (parser)->value); nextp = & TREE_CHAIN (*nextp); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_COMMA)) break; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_error (parser, "expected identifier"); break; } } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { struct c_arg_info *ret = build_arg_info (); ret->types = list; c_parser_consume_token (parser); pop_scope (); return ret; } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); pop_scope (); return NULL; } } else { struct c_arg_info *ret = c_parser_parms_list_declarator (parser, attrs, NULL); pop_scope (); return ret; } } /* Parse a parameter list (possibly empty), including the closing parenthesis but not the opening one. ATTRS are the attributes at the start of the list. EXPR is NULL or an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ static struct c_arg_info * c_parser_parms_list_declarator (c_parser *parser, tree attrs, tree expr) { bool bad_parm = false; /* ??? Following the old parser, forward parameter declarations may use abstract declarators, and if no real parameter declarations follow the forward declarations then this is not diagnosed. Also note as above that attributes are ignored as the only contents of the parentheses, or as the only contents after forward declarations. */ if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { struct c_arg_info *ret = build_arg_info (); c_parser_consume_token (parser); return ret; } if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { struct c_arg_info *ret = build_arg_info (); if (flag_allow_parameterless_variadic_functions) { /* F (...) is allowed. */ ret->types = NULL_TREE; } else { /* Suppress -Wold-style-definition for this case. */ ret->types = error_mark_node; error_at (c_parser_peek_token (parser)->location, "ISO C requires a named argument before %<...%>"); } c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); return ret; } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return NULL; } } /* Nonempty list of parameters, either terminated with semicolon (forward declarations; recurse) or with close parenthesis (normal function) or with ", ... )" (variadic function). */ while (true) { /* Parse a parameter. */ struct c_parm *parm = c_parser_parameter_declaration (parser, attrs); attrs = NULL_TREE; if (parm == NULL) bad_parm = true; else push_parm_decl (parm, &expr); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { tree new_attrs; c_parser_consume_token (parser); mark_forward_parm_decls (); new_attrs = c_parser_attributes (parser); return c_parser_parms_list_declarator (parser, new_attrs, expr); } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); if (bad_parm) return NULL; else return get_parm_info (false, expr); } if (!c_parser_require (parser, CPP_COMMA, "expected %<;%>, %<,%> or %<)%>", UNKNOWN_LOCATION, false)) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return NULL; } if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); if (bad_parm) return NULL; else return get_parm_info (true, expr); } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return NULL; } } } } /* Parse a parameter declaration. ATTRS are the attributes at the start of the declaration if it is the first parameter. */ static struct c_parm * c_parser_parameter_declaration (c_parser *parser, tree attrs) { struct c_declspecs *specs; struct c_declarator *declarator; tree prefix_attrs; tree postfix_attrs = NULL_TREE; bool dummy = false; /* Accept #pragmas between parameter declarations. */ while (c_parser_next_token_is (parser, CPP_PRAGMA)) c_parser_pragma (parser, pragma_param, NULL); if (!c_parser_next_token_starts_declspecs (parser)) { c_token *token = c_parser_peek_token (parser); if (parser->error) return NULL; c_parser_set_source_position_from_token (token); if (c_parser_next_tokens_start_typename (parser, cla_prefer_type)) { name_hint hint = lookup_name_fuzzy (token->value, FUZZY_LOOKUP_TYPENAME, token->location); if (hint) { gcc_rich_location richloc (token->location); richloc.add_fixit_replace (hint.suggestion ()); error_at (&richloc, "unknown type name %qE; did you mean %qs?", token->value, hint.suggestion ()); } else error_at (token->location, "unknown type name %qE", token->value); parser->error = true; } /* ??? In some Objective-C cases '...' isn't applicable so there should be a different message. */ else c_parser_error (parser, "expected declaration specifiers or %<...%>"); c_parser_skip_to_end_of_parameter (parser); return NULL; } location_t start_loc = c_parser_peek_token (parser)->location; specs = build_null_declspecs (); if (attrs) { declspecs_add_attrs (input_location, specs, attrs); attrs = NULL_TREE; } c_parser_declspecs (parser, specs, true, true, true, true, false, cla_nonabstract_decl); finish_declspecs (specs); pending_xref_error (); prefix_attrs = specs->attrs; specs->attrs = NULL_TREE; declarator = c_parser_declarator (parser, specs->typespec_kind != ctsk_none, C_DTR_PARM, &dummy); if (declarator == NULL) { c_parser_skip_until_found (parser, CPP_COMMA, NULL); return NULL; } if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) postfix_attrs = c_parser_attributes (parser); /* Generate a location for the parameter, ranging from the start of the initial token to the end of the final token. If we have a identifier, then use it for the caret location, e.g. extern int callee (int one, int (*two)(int, int), float three); ~~~~~~^~~~~~~~~~~~~~ otherwise, reuse the start location for the caret location e.g.: extern int callee (int one, int (*)(int, int), float three); ^~~~~~~~~~~~~~~~~ */ location_t end_loc = parser->last_token_location; /* Find any cdk_id declarator; determine if we have an identifier. */ c_declarator *id_declarator = declarator; while (id_declarator && id_declarator->kind != cdk_id) id_declarator = id_declarator->declarator; location_t caret_loc = (id_declarator->u.id ? id_declarator->id_loc : start_loc); location_t param_loc = make_location (caret_loc, start_loc, end_loc); return build_c_parm (specs, chainon (postfix_attrs, prefix_attrs), declarator, param_loc); } /* Parse a string literal in an asm expression. It should not be translated, and wide string literals are an error although permitted by the syntax. This is a GNU extension. asm-string-literal: string-literal ??? At present, following the old parser, the caller needs to have set lex_untranslated_string to 1. It would be better to follow the C++ parser rather than using this kludge. */ static tree c_parser_asm_string_literal (c_parser *parser) { tree str; int save_flag = warn_overlength_strings; warn_overlength_strings = 0; if (c_parser_next_token_is (parser, CPP_STRING)) { str = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else if (c_parser_next_token_is (parser, CPP_WSTRING)) { error_at (c_parser_peek_token (parser)->location, "wide string literal in %<asm%>"); str = build_string (1, ""); c_parser_consume_token (parser); } else { c_parser_error (parser, "expected string literal"); str = NULL_TREE; } warn_overlength_strings = save_flag; return str; } /* Parse a simple asm expression. This is used in restricted contexts, where a full expression with inputs and outputs does not make sense. This is a GNU extension. simple-asm-expr: asm ( asm-string-literal ) */ static tree c_parser_simple_asm_expr (c_parser *parser) { tree str; gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM)); /* ??? Follow the C++ parser rather than using the lex_untranslated_string kludge. */ parser->lex_untranslated_string = true; c_parser_consume_token (parser); matching_parens parens; if (!parens.require_open (parser)) { parser->lex_untranslated_string = false; return NULL_TREE; } str = c_parser_asm_string_literal (parser); parser->lex_untranslated_string = false; if (!parens.require_close (parser)) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return NULL_TREE; } return str; } static tree c_parser_attribute_any_word (c_parser *parser) { tree attr_name = NULL_TREE; if (c_parser_next_token_is (parser, CPP_KEYWORD)) { /* ??? See comment above about what keywords are accepted here. */ bool ok; switch (c_parser_peek_token (parser)->keyword) { case RID_STATIC: case RID_UNSIGNED: case RID_LONG: case RID_CONST: case RID_EXTERN: case RID_REGISTER: case RID_TYPEDEF: case RID_SHORT: case RID_INLINE: case RID_NORETURN: case RID_VOLATILE: case RID_SIGNED: case RID_AUTO: case RID_RESTRICT: case RID_COMPLEX: case RID_THREAD: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: CASE_RID_FLOATN_NX: case RID_BOOL: case RID_FRACT: case RID_ACCUM: case RID_SAT: case RID_TRANSACTION_ATOMIC: case RID_TRANSACTION_CANCEL: case RID_ATOMIC: case RID_AUTO_TYPE: case RID_INT_N_0: case RID_INT_N_1: case RID_INT_N_2: case RID_INT_N_3: ok = true; break; default: ok = false; break; } if (!ok) return NULL_TREE; /* Accept __attribute__((__const)) as __attribute__((const)) etc. */ attr_name = ridpointers[(int) c_parser_peek_token (parser)->keyword]; } else if (c_parser_next_token_is (parser, CPP_NAME)) attr_name = c_parser_peek_token (parser)->value; return attr_name; } /* Parse (possibly empty) attributes. This is a GNU extension. attributes: empty attributes attribute attribute: __attribute__ ( ( attribute-list ) ) attribute-list: attrib attribute_list , attrib attrib: empty any-word any-word ( identifier ) any-word ( identifier , nonempty-expr-list ) any-word ( expr-list ) where the "identifier" must not be declared as a type, and "any-word" may be any identifier (including one declared as a type), a reserved word storage class specifier, type specifier or type qualifier. ??? This still leaves out most reserved keywords (following the old parser), shouldn't we include them, and why not allow identifiers declared as types to start the arguments? */ static tree c_parser_attributes (c_parser *parser) { tree attrs = NULL_TREE; while (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) { /* ??? Follow the C++ parser rather than using the lex_untranslated_string kludge. */ parser->lex_untranslated_string = true; /* Consume the `__attribute__' keyword. */ c_parser_consume_token (parser); /* Look for the two `(' tokens. */ if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { parser->lex_untranslated_string = false; return attrs; } if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { parser->lex_untranslated_string = false; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return attrs; } /* Parse the attribute list. */ while (c_parser_next_token_is (parser, CPP_COMMA) || c_parser_next_token_is (parser, CPP_NAME) || c_parser_next_token_is (parser, CPP_KEYWORD)) { tree attr, attr_name, attr_args; vec<tree, va_gc> *expr_list; if (c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); continue; } attr_name = c_parser_attribute_any_word (parser); if (attr_name == NULL) break; attr_name = canonicalize_attr_name (attr_name); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_OPEN_PAREN)) { attr = build_tree_list (attr_name, NULL_TREE); /* Add this attribute to the list. */ attrs = chainon (attrs, attr); /* If the next token isn't a comma, we're done. */ if (!c_parser_next_token_is (parser, CPP_COMMA)) break; continue; } c_parser_consume_token (parser); /* Parse the attribute contents. If they start with an identifier which is followed by a comma or close parenthesis, then the arguments start with that identifier; otherwise they are an expression list. In objective-c the identifier may be a classname. */ if (c_parser_next_token_is (parser, CPP_NAME) && (c_parser_peek_token (parser)->id_kind == C_ID_ID || (c_dialect_objc () && c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME)) && ((c_parser_peek_2nd_token (parser)->type == CPP_COMMA) || (c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_PAREN)) && (attribute_takes_identifier_p (attr_name) || (c_dialect_objc () && c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME))) { tree arg1 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) attr_args = build_tree_list (NULL_TREE, arg1); else { tree tree_list; c_parser_consume_token (parser); expr_list = c_parser_expr_list (parser, false, true, NULL, NULL, NULL, NULL); tree_list = build_tree_list_vec (expr_list); attr_args = tree_cons (NULL_TREE, arg1, tree_list); release_tree_vector (expr_list); } } else { if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) attr_args = NULL_TREE; else { expr_list = c_parser_expr_list (parser, false, true, NULL, NULL, NULL, NULL); attr_args = build_tree_list_vec (expr_list); release_tree_vector (expr_list); } } attr = build_tree_list (attr_name, attr_args); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) c_parser_consume_token (parser); else { parser->lex_untranslated_string = false; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return attrs; } /* Add this attribute to the list. */ attrs = chainon (attrs, attr); /* If the next token isn't a comma, we're done. */ if (!c_parser_next_token_is (parser, CPP_COMMA)) break; } /* Look for the two `)' tokens. */ if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) c_parser_consume_token (parser); else { parser->lex_untranslated_string = false; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return attrs; } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) c_parser_consume_token (parser); else { parser->lex_untranslated_string = false; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return attrs; } parser->lex_untranslated_string = false; } return attrs; } /* Parse a type name (C90 6.5.5, C99 6.7.6, C11 6.7.7). ALIGNAS_OK says whether alignment specifiers are OK (only in cases that might be the type name of a compound literal). type-name: specifier-qualifier-list abstract-declarator[opt] */ struct c_type_name * c_parser_type_name (c_parser *parser, bool alignas_ok) { struct c_declspecs *specs = build_null_declspecs (); struct c_declarator *declarator; struct c_type_name *ret; bool dummy = false; c_parser_declspecs (parser, specs, false, true, true, alignas_ok, false, cla_prefer_type); if (!specs->declspecs_seen_p) { c_parser_error (parser, "expected specifier-qualifier-list"); return NULL; } if (specs->type != error_mark_node) { pending_xref_error (); finish_declspecs (specs); } declarator = c_parser_declarator (parser, specs->typespec_kind != ctsk_none, C_DTR_ABSTRACT, &dummy); if (declarator == NULL) return NULL; ret = XOBNEW (&parser_obstack, struct c_type_name); ret->specs = specs; ret->declarator = declarator; return ret; } /* Parse an initializer (C90 6.5.7, C99 6.7.8, C11 6.7.9). initializer: assignment-expression { initializer-list } { initializer-list , } initializer-list: designation[opt] initializer initializer-list , designation[opt] initializer designation: designator-list = designator-list: designator designator-list designator designator: array-designator . identifier array-designator: [ constant-expression ] GNU extensions: initializer: { } designation: array-designator identifier : array-designator: [ constant-expression ... constant-expression ] Any expression without commas is accepted in the syntax for the constant-expressions, with non-constant expressions rejected later. This function is only used for top-level initializers; for nested ones, see c_parser_initval. */ static struct c_expr c_parser_initializer (c_parser *parser) { if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) return c_parser_braced_init (parser, NULL_TREE, false, NULL); else { struct c_expr ret; location_t loc = c_parser_peek_token (parser)->location; ret = c_parser_expr_no_commas (parser, NULL); if (TREE_CODE (ret.value) != STRING_CST && TREE_CODE (ret.value) != COMPOUND_LITERAL_EXPR) ret = convert_lvalue_to_rvalue (loc, ret, true, true); return ret; } } /* The location of the last comma within the current initializer list, or UNKNOWN_LOCATION if not within one. */ location_t last_init_list_comma; /* Parse a braced initializer list. TYPE is the type specified for a compound literal, and NULL_TREE for other initializers and for nested braced lists. NESTED_P is true for nested braced lists, false for the list of a compound literal or the list that is the top-level initializer in a declaration. */ static struct c_expr c_parser_braced_init (c_parser *parser, tree type, bool nested_p, struct obstack *outer_obstack) { struct c_expr ret; struct obstack braced_init_obstack; location_t brace_loc = c_parser_peek_token (parser)->location; gcc_obstack_init (&braced_init_obstack); gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE)); matching_braces braces; braces.consume_open (parser); if (nested_p) { finish_implicit_inits (brace_loc, outer_obstack); push_init_level (brace_loc, 0, &braced_init_obstack); } else really_start_incremental_init (type); if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { pedwarn (brace_loc, OPT_Wpedantic, "ISO C forbids empty initializer braces"); } else { /* Parse a non-empty initializer list, possibly with a trailing comma. */ while (true) { c_parser_initelt (parser, &braced_init_obstack); if (parser->error) break; if (c_parser_next_token_is (parser, CPP_COMMA)) { last_init_list_comma = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); } else break; if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) break; } } c_token *next_tok = c_parser_peek_token (parser); if (next_tok->type != CPP_CLOSE_BRACE) { ret.set_error (); ret.original_code = ERROR_MARK; ret.original_type = NULL; braces.skip_until_found_close (parser); pop_init_level (brace_loc, 0, &braced_init_obstack, last_init_list_comma); obstack_free (&braced_init_obstack, NULL); return ret; } location_t close_loc = next_tok->location; c_parser_consume_token (parser); ret = pop_init_level (brace_loc, 0, &braced_init_obstack, close_loc); obstack_free (&braced_init_obstack, NULL); set_c_expr_source_range (&ret, brace_loc, close_loc); return ret; } /* Parse a nested initializer, including designators. */ static void c_parser_initelt (c_parser *parser, struct obstack * braced_init_obstack) { /* Parse any designator or designator list. A single array designator may have the subsequent "=" omitted in GNU C, but a longer list or a structure member designator may not. */ if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON) { /* Old-style structure member designator. */ set_init_label (c_parser_peek_token (parser)->location, c_parser_peek_token (parser)->value, c_parser_peek_token (parser)->location, braced_init_obstack); /* Use the colon as the error location. */ pedwarn (c_parser_peek_2nd_token (parser)->location, OPT_Wpedantic, "obsolete use of designated initializer with %<:%>"); c_parser_consume_token (parser); c_parser_consume_token (parser); } else { /* des_seen is 0 if there have been no designators, 1 if there has been a single array designator and 2 otherwise. */ int des_seen = 0; /* Location of a designator. */ location_t des_loc = UNKNOWN_LOCATION; /* Quiet warning. */ while (c_parser_next_token_is (parser, CPP_OPEN_SQUARE) || c_parser_next_token_is (parser, CPP_DOT)) { int des_prev = des_seen; if (!des_seen) des_loc = c_parser_peek_token (parser)->location; if (des_seen < 2) des_seen++; if (c_parser_next_token_is (parser, CPP_DOT)) { des_seen = 2; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { set_init_label (des_loc, c_parser_peek_token (parser)->value, c_parser_peek_token (parser)->location, braced_init_obstack); c_parser_consume_token (parser); } else { struct c_expr init; init.set_error (); init.original_code = ERROR_MARK; init.original_type = NULL; c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_COMMA, NULL); process_init_element (input_location, init, false, braced_init_obstack); return; } } else { tree first, second; location_t ellipsis_loc = UNKNOWN_LOCATION; /* Quiet warning. */ location_t array_index_loc = UNKNOWN_LOCATION; /* ??? Following the old parser, [ objc-receiver objc-message-args ] is accepted as an initializer, being distinguished from a designator by what follows the first assignment expression inside the square brackets, but after a first array designator a subsequent square bracket is for Objective-C taken to start an expression, using the obsolete form of designated initializer without '=', rather than possibly being a second level of designation: in LALR terms, the '[' is shifted rather than reducing designator to designator-list. */ if (des_prev == 1 && c_dialect_objc ()) { des_seen = des_prev; break; } if (des_prev == 0 && c_dialect_objc ()) { /* This might be an array designator or an Objective-C message expression. If the former, continue parsing here; if the latter, parse the remainder of the initializer given the starting primary-expression. ??? It might make sense to distinguish when des_prev == 1 as well; see previous comment. */ tree rec, args; struct c_expr mexpr; c_parser_consume_token (parser); if (c_parser_peek_token (parser)->type == CPP_NAME && ((c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME) || (c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME))) { /* Type name receiver. */ tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); rec = objc_get_class_reference (id); goto parse_message_args; } first = c_parser_expr_no_commas (parser, NULL).value; mark_exp_read (first); if (c_parser_next_token_is (parser, CPP_ELLIPSIS) || c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) goto array_desig_after_first; /* Expression receiver. So far only one part without commas has been parsed; there might be more of the expression. */ rec = first; while (c_parser_next_token_is (parser, CPP_COMMA)) { struct c_expr next; location_t comma_loc, exp_loc; comma_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; next = c_parser_expr_no_commas (parser, NULL); next = convert_lvalue_to_rvalue (exp_loc, next, true, true); rec = build_compound_expr (comma_loc, rec, next.value); } parse_message_args: /* Now parse the objc-message-args. */ args = c_parser_objc_message_args (parser); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); mexpr.value = objc_build_message_expr (rec, args); mexpr.original_code = ERROR_MARK; mexpr.original_type = NULL; /* Now parse and process the remainder of the initializer, starting with this message expression as a primary-expression. */ c_parser_initval (parser, &mexpr, braced_init_obstack); return; } c_parser_consume_token (parser); array_index_loc = c_parser_peek_token (parser)->location; first = c_parser_expr_no_commas (parser, NULL).value; mark_exp_read (first); array_desig_after_first: if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { ellipsis_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); second = c_parser_expr_no_commas (parser, NULL).value; mark_exp_read (second); } else second = NULL_TREE; if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) { c_parser_consume_token (parser); set_init_index (array_index_loc, first, second, braced_init_obstack); if (second) pedwarn (ellipsis_loc, OPT_Wpedantic, "ISO C forbids specifying range of elements to initialize"); } else c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); } } if (des_seen >= 1) { if (c_parser_next_token_is (parser, CPP_EQ)) { pedwarn_c90 (des_loc, OPT_Wpedantic, "ISO C90 forbids specifying subobject " "to initialize"); c_parser_consume_token (parser); } else { if (des_seen == 1) pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic, "obsolete use of designated initializer without %<=%>"); else { struct c_expr init; init.set_error (); init.original_code = ERROR_MARK; init.original_type = NULL; c_parser_error (parser, "expected %<=%>"); c_parser_skip_until_found (parser, CPP_COMMA, NULL); process_init_element (input_location, init, false, braced_init_obstack); return; } } } } c_parser_initval (parser, NULL, braced_init_obstack); } /* Parse a nested initializer; as c_parser_initializer but parses initializers within braced lists, after any designators have been applied. If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the initializer. */ static void c_parser_initval (c_parser *parser, struct c_expr *after, struct obstack * braced_init_obstack) { struct c_expr init; gcc_assert (!after || c_dialect_objc ()); location_t loc = c_parser_peek_token (parser)->location; if (c_parser_next_token_is (parser, CPP_OPEN_BRACE) && !after) init = c_parser_braced_init (parser, NULL_TREE, true, braced_init_obstack); else { init = c_parser_expr_no_commas (parser, after); if (init.value != NULL_TREE && TREE_CODE (init.value) != STRING_CST && TREE_CODE (init.value) != COMPOUND_LITERAL_EXPR) init = convert_lvalue_to_rvalue (loc, init, true, true); } process_init_element (loc, init, false, braced_init_obstack); } /* Parse a compound statement (possibly a function body) (C90 6.6.2, C99 6.8.2, C11 6.8.2). compound-statement: { block-item-list[opt] } { label-declarations block-item-list } block-item-list: block-item block-item-list block-item block-item: nested-declaration statement nested-declaration: declaration GNU extensions: compound-statement: { label-declarations block-item-list } nested-declaration: __extension__ nested-declaration nested-function-definition label-declarations: label-declaration label-declarations label-declaration label-declaration: __label__ identifier-list ; Allowing the mixing of declarations and code is new in C99. The GNU syntax also permits (not shown above) labels at the end of compound statements, which yield an error. We don't allow labels on declarations; this might seem like a natural extension, but there would be a conflict between attributes on the label and prefix attributes on the declaration. ??? The syntax follows the old parser in requiring something after label declarations. Although they are erroneous if the labels declared aren't defined, is it useful for the syntax to be this way? OpenACC: block-item: openacc-directive openacc-directive: update-directive OpenMP: block-item: openmp-directive openmp-directive: barrier-directive flush-directive taskwait-directive taskyield-directive cancel-directive cancellation-point-directive */ static tree c_parser_compound_statement (c_parser *parser) { tree stmt; location_t brace_loc; brace_loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>")) { /* Ensure a scope is entered and left anyway to avoid confusion if we have just prepared to enter a function body. */ stmt = c_begin_compound_stmt (true); c_end_compound_stmt (brace_loc, stmt, true); return error_mark_node; } stmt = c_begin_compound_stmt (true); c_parser_compound_statement_nostart (parser); return c_end_compound_stmt (brace_loc, stmt, true); } /* Parse a compound statement except for the opening brace. This is used for parsing both compound statements and statement expressions (which follow different paths to handling the opening). */ static void c_parser_compound_statement_nostart (c_parser *parser) { bool last_stmt = false; bool last_label = false; bool save_valid_for_pragma = valid_location_for_stdc_pragma_p (); location_t label_loc = UNKNOWN_LOCATION; /* Quiet warning. */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { add_debug_begin_stmt (c_parser_peek_token (parser)->location); c_parser_consume_token (parser); return; } mark_valid_location_for_stdc_pragma (true); if (c_parser_next_token_is_keyword (parser, RID_LABEL)) { /* Read zero or more forward-declarations for labels that nested functions can jump to. */ mark_valid_location_for_stdc_pragma (false); while (c_parser_next_token_is_keyword (parser, RID_LABEL)) { label_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree label; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } label = declare_label (c_parser_peek_token (parser)->value); C_DECLARED_LABEL_FLAG (label) = 1; add_stmt (build_stmt (label_loc, DECL_EXPR, label)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } pedwarn (label_loc, OPT_Wpedantic, "ISO C forbids label declarations"); } /* We must now have at least one statement, label or declaration. */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { mark_valid_location_for_stdc_pragma (save_valid_for_pragma); c_parser_error (parser, "expected declaration or statement"); c_parser_consume_token (parser); return; } while (c_parser_next_token_is_not (parser, CPP_CLOSE_BRACE)) { location_t loc = c_parser_peek_token (parser)->location; loc = expansion_point_location_if_in_system_header (loc); if (c_parser_next_token_is_keyword (parser, RID_CASE) || c_parser_next_token_is_keyword (parser, RID_DEFAULT) || (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON)) { if (c_parser_next_token_is_keyword (parser, RID_CASE)) label_loc = c_parser_peek_2nd_token (parser)->location; else label_loc = c_parser_peek_token (parser)->location; last_label = true; last_stmt = false; mark_valid_location_for_stdc_pragma (false); c_parser_label (parser); } else if (!last_label && c_parser_next_tokens_start_declaration (parser)) { last_label = false; mark_valid_location_for_stdc_pragma (false); bool fallthru_attr_p = false; c_parser_declaration_or_fndef (parser, true, true, true, true, true, NULL, vNULL, NULL, &fallthru_attr_p); if (last_stmt && !fallthru_attr_p) pedwarn_c90 (loc, OPT_Wdeclaration_after_statement, "ISO C90 forbids mixed declarations and code"); last_stmt = fallthru_attr_p; } else if (!last_label && c_parser_next_token_is_keyword (parser, RID_EXTENSION)) { /* __extension__ can start a declaration, but is also an unary operator that can start an expression. Consume all but the last of a possible series of __extension__ to determine which. */ while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD && (c_parser_peek_2nd_token (parser)->keyword == RID_EXTENSION)) c_parser_consume_token (parser); if (c_token_starts_declaration (c_parser_peek_2nd_token (parser))) { int ext; ext = disable_extension_diagnostics (); c_parser_consume_token (parser); last_label = false; mark_valid_location_for_stdc_pragma (false); c_parser_declaration_or_fndef (parser, true, true, true, true, true, NULL, vNULL); /* Following the old parser, __extension__ does not disable this diagnostic. */ restore_extension_diagnostics (ext); if (last_stmt) pedwarn_c90 (loc, OPT_Wdeclaration_after_statement, "ISO C90 forbids mixed declarations and code"); last_stmt = false; } else goto statement; } else if (c_parser_next_token_is (parser, CPP_PRAGMA)) { /* External pragmas, and some omp pragmas, are not associated with regular c code, and so are not to be considered statements syntactically. This ensures that the user doesn't put them places that would turn into syntax errors if the directive were ignored. */ if (c_parser_pragma (parser, last_label ? pragma_stmt : pragma_compound, NULL)) last_label = false, last_stmt = true; } else if (c_parser_next_token_is (parser, CPP_EOF)) { mark_valid_location_for_stdc_pragma (save_valid_for_pragma); c_parser_error (parser, "expected declaration or statement"); return; } else if (c_parser_next_token_is_keyword (parser, RID_ELSE)) { if (parser->in_if_block) { mark_valid_location_for_stdc_pragma (save_valid_for_pragma); error_at (loc, "expected %<}%> before %<else%>"); return; } else { error_at (loc, "%<else%> without a previous %<if%>"); c_parser_consume_token (parser); continue; } } else { statement: last_label = false; last_stmt = true; mark_valid_location_for_stdc_pragma (false); c_parser_statement_after_labels (parser, NULL); } parser->error = false; } if (last_label) error_at (label_loc, "label at end of compound statement"); c_parser_consume_token (parser); /* Restore the value we started with. */ mark_valid_location_for_stdc_pragma (save_valid_for_pragma); } /* Parse all consecutive labels. */ static void c_parser_all_labels (c_parser *parser) { while (c_parser_next_token_is_keyword (parser, RID_CASE) || c_parser_next_token_is_keyword (parser, RID_DEFAULT) || (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON)) c_parser_label (parser); } /* Parse a label (C90 6.6.1, C99 6.8.1, C11 6.8.1). label: identifier : attributes[opt] case constant-expression : default : GNU extensions: label: case constant-expression ... constant-expression : The use of attributes on labels is a GNU extension. The syntax in GNU C accepts any expressions without commas, non-constant expressions being rejected later. */ static void c_parser_label (c_parser *parser) { location_t loc1 = c_parser_peek_token (parser)->location; tree label = NULL_TREE; /* Remember whether this case or a user-defined label is allowed to fall through to. */ bool fallthrough_p = c_parser_peek_token (parser)->flags & PREV_FALLTHROUGH; if (c_parser_next_token_is_keyword (parser, RID_CASE)) { tree exp1, exp2; c_parser_consume_token (parser); exp1 = c_parser_expr_no_commas (parser, NULL).value; if (c_parser_next_token_is (parser, CPP_COLON)) { c_parser_consume_token (parser); label = do_case (loc1, exp1, NULL_TREE); } else if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { c_parser_consume_token (parser); exp2 = c_parser_expr_no_commas (parser, NULL).value; if (c_parser_require (parser, CPP_COLON, "expected %<:%>")) label = do_case (loc1, exp1, exp2); } else c_parser_error (parser, "expected %<:%> or %<...%>"); } else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT)) { c_parser_consume_token (parser); if (c_parser_require (parser, CPP_COLON, "expected %<:%>")) label = do_case (loc1, NULL_TREE, NULL_TREE); } else { tree name = c_parser_peek_token (parser)->value; tree tlab; tree attrs; location_t loc2 = c_parser_peek_token (parser)->location; gcc_assert (c_parser_next_token_is (parser, CPP_NAME)); c_parser_consume_token (parser); gcc_assert (c_parser_next_token_is (parser, CPP_COLON)); c_parser_consume_token (parser); attrs = c_parser_attributes (parser); tlab = define_label (loc2, name); if (tlab) { decl_attributes (&tlab, attrs, 0); label = add_stmt (build_stmt (loc1, LABEL_EXPR, tlab)); } } if (label) { if (TREE_CODE (label) == LABEL_EXPR) FALLTHROUGH_LABEL_P (LABEL_EXPR_LABEL (label)) = fallthrough_p; else FALLTHROUGH_LABEL_P (CASE_LABEL (label)) = fallthrough_p; /* Allow '__attribute__((fallthrough));'. */ if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) { location_t loc = c_parser_peek_token (parser)->location; tree attrs = c_parser_attributes (parser); if (attribute_fallthrough_p (attrs)) { if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { tree fn = build_call_expr_internal_loc (loc, IFN_FALLTHROUGH, void_type_node, 0); add_stmt (fn); } else warning_at (loc, OPT_Wattributes, "%<fallthrough%> attribute " "not followed by %<;%>"); } else if (attrs != NULL_TREE) warning_at (loc, OPT_Wattributes, "only attribute %<fallthrough%>" " can be applied to a null statement"); } if (c_parser_next_tokens_start_declaration (parser)) { error_at (c_parser_peek_token (parser)->location, "a label can only be part of a statement and " "a declaration is not a statement"); c_parser_declaration_or_fndef (parser, /*fndef_ok*/ false, /*static_assert_ok*/ true, /*empty_ok*/ true, /*nested*/ true, /*start_attr_ok*/ true, NULL, vNULL); } } } /* Parse a statement (C90 6.6, C99 6.8, C11 6.8). statement: labeled-statement compound-statement expression-statement selection-statement iteration-statement jump-statement labeled-statement: label statement expression-statement: expression[opt] ; selection-statement: if-statement switch-statement iteration-statement: while-statement do-statement for-statement jump-statement: goto identifier ; continue ; break ; return expression[opt] ; GNU extensions: statement: asm-statement jump-statement: goto * expression ; expression-statement: attributes ; Objective-C: statement: objc-throw-statement objc-try-catch-statement objc-synchronized-statement objc-throw-statement: @throw expression ; @throw ; OpenACC: statement: openacc-construct openacc-construct: parallel-construct kernels-construct data-construct loop-construct parallel-construct: parallel-directive structured-block kernels-construct: kernels-directive structured-block data-construct: data-directive structured-block loop-construct: loop-directive structured-block OpenMP: statement: openmp-construct openmp-construct: parallel-construct for-construct simd-construct for-simd-construct sections-construct single-construct parallel-for-construct parallel-for-simd-construct parallel-sections-construct master-construct critical-construct atomic-construct ordered-construct parallel-construct: parallel-directive structured-block for-construct: for-directive iteration-statement simd-construct: simd-directive iteration-statements for-simd-construct: for-simd-directive iteration-statements sections-construct: sections-directive section-scope single-construct: single-directive structured-block parallel-for-construct: parallel-for-directive iteration-statement parallel-for-simd-construct: parallel-for-simd-directive iteration-statement parallel-sections-construct: parallel-sections-directive section-scope master-construct: master-directive structured-block critical-construct: critical-directive structured-block atomic-construct: atomic-directive expression-statement ordered-construct: ordered-directive structured-block Transactional Memory: statement: transaction-statement transaction-cancel-statement IF_P is used to track whether there's a (possibly labeled) if statement which is not enclosed in braces and has an else clause. This is used to implement -Wparentheses. */ static void c_parser_statement (c_parser *parser, bool *if_p, location_t *loc_after_labels) { c_parser_all_labels (parser); if (loc_after_labels) *loc_after_labels = c_parser_peek_token (parser)->location; c_parser_statement_after_labels (parser, if_p, NULL); } /* Parse a statement, other than a labeled statement. CHAIN is a vector of if-else-if conditions. IF_P is used to track whether there's a (possibly labeled) if statement which is not enclosed in braces and has an else clause. This is used to implement -Wparentheses. */ static void c_parser_statement_after_labels (c_parser *parser, bool *if_p, vec<tree> *chain) { location_t loc = c_parser_peek_token (parser)->location; tree stmt = NULL_TREE; bool in_if_block = parser->in_if_block; parser->in_if_block = false; if (if_p != NULL) *if_p = false; if (c_parser_peek_token (parser)->type != CPP_OPEN_BRACE) add_debug_begin_stmt (loc); switch (c_parser_peek_token (parser)->type) { case CPP_OPEN_BRACE: add_stmt (c_parser_compound_statement (parser)); break; case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_IF: c_parser_if_statement (parser, if_p, chain); break; case RID_SWITCH: c_parser_switch_statement (parser, if_p); break; case RID_WHILE: c_parser_while_statement (parser, false, 0, if_p); break; case RID_DO: c_parser_do_statement (parser, 0, false); break; case RID_FOR: c_parser_for_statement (parser, false, 0, if_p); break; case RID_GOTO: c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { stmt = c_finish_goto_label (loc, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); } else if (c_parser_next_token_is (parser, CPP_MULT)) { struct c_expr val; c_parser_consume_token (parser); val = c_parser_expression (parser); val = convert_lvalue_to_rvalue (loc, val, false, true); stmt = c_finish_goto_ptr (loc, val.value); } else c_parser_error (parser, "expected identifier or %<*%>"); goto expect_semicolon; case RID_CONTINUE: c_parser_consume_token (parser); stmt = c_finish_bc_stmt (loc, &c_cont_label, false); goto expect_semicolon; case RID_BREAK: c_parser_consume_token (parser); stmt = c_finish_bc_stmt (loc, &c_break_label, true); goto expect_semicolon; case RID_RETURN: c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { stmt = c_finish_return (loc, NULL_TREE, NULL_TREE); c_parser_consume_token (parser); } else { location_t xloc = c_parser_peek_token (parser)->location; struct c_expr expr = c_parser_expression_conv (parser); mark_exp_read (expr.value); stmt = c_finish_return (EXPR_LOC_OR_LOC (expr.value, xloc), expr.value, expr.original_type); goto expect_semicolon; } break; case RID_ASM: stmt = c_parser_asm_statement (parser); break; case RID_TRANSACTION_ATOMIC: case RID_TRANSACTION_RELAXED: stmt = c_parser_transaction (parser, c_parser_peek_token (parser)->keyword); break; case RID_TRANSACTION_CANCEL: stmt = c_parser_transaction_cancel (parser); goto expect_semicolon; case RID_AT_THROW: gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { stmt = objc_build_throw_stmt (loc, NULL_TREE); c_parser_consume_token (parser); } else { struct c_expr expr = c_parser_expression (parser); expr = convert_lvalue_to_rvalue (loc, expr, false, false); expr.value = c_fully_fold (expr.value, false, NULL); stmt = objc_build_throw_stmt (loc, expr.value); goto expect_semicolon; } break; case RID_AT_TRY: gcc_assert (c_dialect_objc ()); c_parser_objc_try_catch_finally_statement (parser); break; case RID_AT_SYNCHRONIZED: gcc_assert (c_dialect_objc ()); c_parser_objc_synchronized_statement (parser); break; case RID_ATTRIBUTE: { /* Allow '__attribute__((fallthrough));'. */ tree attrs = c_parser_attributes (parser); if (attribute_fallthrough_p (attrs)) { if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { tree fn = build_call_expr_internal_loc (loc, IFN_FALLTHROUGH, void_type_node, 0); add_stmt (fn); /* Eat the ';'. */ c_parser_consume_token (parser); } else warning_at (loc, OPT_Wattributes, "%<fallthrough%> attribute not followed " "by %<;%>"); } else if (attrs != NULL_TREE) warning_at (loc, OPT_Wattributes, "only attribute %<fallthrough%>" " can be applied to a null statement"); break; } default: goto expr_stmt; } break; case CPP_SEMICOLON: c_parser_consume_token (parser); break; case CPP_CLOSE_PAREN: case CPP_CLOSE_SQUARE: /* Avoid infinite loop in error recovery: c_parser_skip_until_found stops at a closing nesting delimiter without consuming it, but here we need to consume it to proceed further. */ c_parser_error (parser, "expected statement"); c_parser_consume_token (parser); break; case CPP_PRAGMA: c_parser_pragma (parser, pragma_stmt, if_p); break; default: expr_stmt: stmt = c_finish_expr_stmt (loc, c_parser_expression_conv (parser).value); expect_semicolon: c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); break; } /* Two cases cannot and do not have line numbers associated: If stmt is degenerate, such as "2;", then stmt is an INTEGER_CST, which cannot hold line numbers. But that's OK because the statement will either be changed to a MODIFY_EXPR during gimplification of the statement expr, or discarded. If stmt was compound, but without new variables, we will have skipped the creation of a BIND and will have a bare STATEMENT_LIST. But that's OK because (recursively) all of the component statements should already have line numbers assigned. ??? Can we discard no-op statements earlier? */ if (EXPR_LOCATION (stmt) == UNKNOWN_LOCATION) protected_set_expr_location (stmt, loc); parser->in_if_block = in_if_block; } /* Parse the condition from an if, do, while or for statements. */ static tree c_parser_condition (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; tree cond; cond = c_parser_expression_conv (parser).value; cond = c_objc_common_truthvalue_conversion (loc, cond); cond = c_fully_fold (cond, false, NULL); if (warn_sequence_point) verify_sequence_points (cond); return cond; } /* Parse a parenthesized condition from an if, do or while statement. condition: ( expression ) */ static tree c_parser_paren_condition (c_parser *parser) { tree cond; matching_parens parens; if (!parens.require_open (parser)) return error_mark_node; cond = c_parser_condition (parser); parens.skip_until_found_close (parser); return cond; } /* Parse a statement which is a block in C99. IF_P is used to track whether there's a (possibly labeled) if statement which is not enclosed in braces and has an else clause. This is used to implement -Wparentheses. */ static tree c_parser_c99_block_statement (c_parser *parser, bool *if_p, location_t *loc_after_labels) { tree block = c_begin_compound_stmt (flag_isoc99); location_t loc = c_parser_peek_token (parser)->location; c_parser_statement (parser, if_p, loc_after_labels); return c_end_compound_stmt (loc, block, flag_isoc99); } /* Parse the body of an if statement. This is just parsing a statement but (a) it is a block in C99, (b) we track whether the body is an if statement for the sake of -Wparentheses warnings, (c) we handle an empty body specially for the sake of -Wempty-body warnings, and (d) we call parser_compound_statement directly because c_parser_statement_after_labels resets parser->in_if_block. IF_P is used to track whether there's a (possibly labeled) if statement which is not enclosed in braces and has an else clause. This is used to implement -Wparentheses. */ static tree c_parser_if_body (c_parser *parser, bool *if_p, const token_indent_info &if_tinfo) { tree block = c_begin_compound_stmt (flag_isoc99); location_t body_loc = c_parser_peek_token (parser)->location; location_t body_loc_after_labels = UNKNOWN_LOCATION; token_indent_info body_tinfo = get_token_indent_info (c_parser_peek_token (parser)); c_parser_all_labels (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { location_t loc = c_parser_peek_token (parser)->location; add_stmt (build_empty_stmt (loc)); c_parser_consume_token (parser); if (!c_parser_next_token_is_keyword (parser, RID_ELSE)) warning_at (loc, OPT_Wempty_body, "suggest braces around empty body in an %<if%> statement"); } else if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) add_stmt (c_parser_compound_statement (parser)); else { body_loc_after_labels = c_parser_peek_token (parser)->location; c_parser_statement_after_labels (parser, if_p); } token_indent_info next_tinfo = get_token_indent_info (c_parser_peek_token (parser)); warn_for_misleading_indentation (if_tinfo, body_tinfo, next_tinfo); if (body_loc_after_labels != UNKNOWN_LOCATION && next_tinfo.type != CPP_SEMICOLON) warn_for_multistatement_macros (body_loc_after_labels, next_tinfo.location, if_tinfo.location, RID_IF); return c_end_compound_stmt (body_loc, block, flag_isoc99); } /* Parse the else body of an if statement. This is just parsing a statement but (a) it is a block in C99, (b) we handle an empty body specially for the sake of -Wempty-body warnings. CHAIN is a vector of if-else-if conditions. */ static tree c_parser_else_body (c_parser *parser, const token_indent_info &else_tinfo, vec<tree> *chain) { location_t body_loc = c_parser_peek_token (parser)->location; tree block = c_begin_compound_stmt (flag_isoc99); token_indent_info body_tinfo = get_token_indent_info (c_parser_peek_token (parser)); location_t body_loc_after_labels = UNKNOWN_LOCATION; c_parser_all_labels (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { location_t loc = c_parser_peek_token (parser)->location; warning_at (loc, OPT_Wempty_body, "suggest braces around empty body in an %<else%> statement"); add_stmt (build_empty_stmt (loc)); c_parser_consume_token (parser); } else { if (!c_parser_next_token_is (parser, CPP_OPEN_BRACE)) body_loc_after_labels = c_parser_peek_token (parser)->location; c_parser_statement_after_labels (parser, NULL, chain); } token_indent_info next_tinfo = get_token_indent_info (c_parser_peek_token (parser)); warn_for_misleading_indentation (else_tinfo, body_tinfo, next_tinfo); if (body_loc_after_labels != UNKNOWN_LOCATION && next_tinfo.type != CPP_SEMICOLON) warn_for_multistatement_macros (body_loc_after_labels, next_tinfo.location, else_tinfo.location, RID_ELSE); return c_end_compound_stmt (body_loc, block, flag_isoc99); } /* We might need to reclassify any previously-lexed identifier, e.g. when we've left a for loop with an if-statement without else in the body - we might have used a wrong scope for the token. See PR67784. */ static void c_parser_maybe_reclassify_token (c_parser *parser) { if (c_parser_next_token_is (parser, CPP_NAME)) { c_token *token = c_parser_peek_token (parser); if (token->id_kind != C_ID_CLASSNAME) { tree decl = lookup_name (token->value); token->id_kind = C_ID_ID; if (decl) { if (TREE_CODE (decl) == TYPE_DECL) token->id_kind = C_ID_TYPENAME; } else if (c_dialect_objc ()) { tree objc_interface_decl = objc_is_class_name (token->value); /* Objective-C class names are in the same namespace as variables and typedefs, and hence are shadowed by local declarations. */ if (objc_interface_decl) { token->value = objc_interface_decl; token->id_kind = C_ID_CLASSNAME; } } } } } /* Parse an if statement (C90 6.6.4, C99 6.8.4, C11 6.8.4). if-statement: if ( expression ) statement if ( expression ) statement else statement CHAIN is a vector of if-else-if conditions. IF_P is used to track whether there's a (possibly labeled) if statement which is not enclosed in braces and has an else clause. This is used to implement -Wparentheses. */ static void c_parser_if_statement (c_parser *parser, bool *if_p, vec<tree> *chain) { tree block; location_t loc; tree cond; bool nested_if = false; tree first_body, second_body; bool in_if_block; gcc_assert (c_parser_next_token_is_keyword (parser, RID_IF)); token_indent_info if_tinfo = get_token_indent_info (c_parser_peek_token (parser)); c_parser_consume_token (parser); block = c_begin_compound_stmt (flag_isoc99); loc = c_parser_peek_token (parser)->location; cond = c_parser_paren_condition (parser); in_if_block = parser->in_if_block; parser->in_if_block = true; first_body = c_parser_if_body (parser, &nested_if, if_tinfo); parser->in_if_block = in_if_block; if (warn_duplicated_cond) warn_duplicated_cond_add_or_warn (EXPR_LOCATION (cond), cond, &chain); if (c_parser_next_token_is_keyword (parser, RID_ELSE)) { token_indent_info else_tinfo = get_token_indent_info (c_parser_peek_token (parser)); c_parser_consume_token (parser); if (warn_duplicated_cond) { if (c_parser_next_token_is_keyword (parser, RID_IF) && chain == NULL) { /* We've got "if (COND) else if (COND2)". Start the condition chain and add COND as the first element. */ chain = new vec<tree> (); if (!CONSTANT_CLASS_P (cond) && !TREE_SIDE_EFFECTS (cond)) chain->safe_push (cond); } else if (!c_parser_next_token_is_keyword (parser, RID_IF)) { /* This is if-else without subsequent if. Zap the condition chain; we would have already warned at this point. */ delete chain; chain = NULL; } } second_body = c_parser_else_body (parser, else_tinfo, chain); /* Set IF_P to true to indicate that this if statement has an else clause. This may trigger the Wparentheses warning below when we get back up to the parent if statement. */ if (if_p != NULL) *if_p = true; } else { second_body = NULL_TREE; /* Diagnose an ambiguous else if if-then-else is nested inside if-then. */ if (nested_if) warning_at (loc, OPT_Wdangling_else, "suggest explicit braces to avoid ambiguous %<else%>"); if (warn_duplicated_cond) { /* This if statement does not have an else clause. We don't need the condition chain anymore. */ delete chain; chain = NULL; } } c_finish_if_stmt (loc, cond, first_body, second_body); add_stmt (c_end_compound_stmt (loc, block, flag_isoc99)); c_parser_maybe_reclassify_token (parser); } /* Parse a switch statement (C90 6.6.4, C99 6.8.4, C11 6.8.4). switch-statement: switch (expression) statement */ static void c_parser_switch_statement (c_parser *parser, bool *if_p) { struct c_expr ce; tree block, expr, body, save_break; location_t switch_loc = c_parser_peek_token (parser)->location; location_t switch_cond_loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_SWITCH)); c_parser_consume_token (parser); block = c_begin_compound_stmt (flag_isoc99); bool explicit_cast_p = false; matching_parens parens; if (parens.require_open (parser)) { switch_cond_loc = c_parser_peek_token (parser)->location; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN) && c_token_starts_typename (c_parser_peek_2nd_token (parser))) explicit_cast_p = true; ce = c_parser_expression (parser); ce = convert_lvalue_to_rvalue (switch_cond_loc, ce, true, false); expr = ce.value; /* ??? expr has no valid location? */ parens.skip_until_found_close (parser); } else { switch_cond_loc = UNKNOWN_LOCATION; expr = error_mark_node; ce.original_type = error_mark_node; } c_start_case (switch_loc, switch_cond_loc, expr, explicit_cast_p); save_break = c_break_label; c_break_label = NULL_TREE; location_t loc_after_labels; bool open_brace_p = c_parser_peek_token (parser)->type == CPP_OPEN_BRACE; body = c_parser_c99_block_statement (parser, if_p, &loc_after_labels); location_t next_loc = c_parser_peek_token (parser)->location; if (!open_brace_p && c_parser_peek_token (parser)->type != CPP_SEMICOLON) warn_for_multistatement_macros (loc_after_labels, next_loc, switch_loc, RID_SWITCH); if (c_break_label) { location_t here = c_parser_peek_token (parser)->location; tree t = build1 (LABEL_EXPR, void_type_node, c_break_label); SET_EXPR_LOCATION (t, here); SWITCH_BREAK_LABEL_P (c_break_label) = 1; append_to_statement_list_force (t, &body); } c_finish_case (body, ce.original_type); c_break_label = save_break; add_stmt (c_end_compound_stmt (switch_loc, block, flag_isoc99)); c_parser_maybe_reclassify_token (parser); } /* Parse a while statement (C90 6.6.5, C99 6.8.5, C11 6.8.5). while-statement: while (expression) statement IF_P is used to track whether there's a (possibly labeled) if statement which is not enclosed in braces and has an else clause. This is used to implement -Wparentheses. */ static void c_parser_while_statement (c_parser *parser, bool ivdep, unsigned short unroll, bool *if_p) { tree block, cond, body, save_break, save_cont; location_t loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_WHILE)); token_indent_info while_tinfo = get_token_indent_info (c_parser_peek_token (parser)); c_parser_consume_token (parser); block = c_begin_compound_stmt (flag_isoc99); loc = c_parser_peek_token (parser)->location; cond = c_parser_paren_condition (parser); if (ivdep && cond != error_mark_node) cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond, build_int_cst (integer_type_node, annot_expr_ivdep_kind), integer_zero_node); if (unroll && cond != error_mark_node) cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond, build_int_cst (integer_type_node, annot_expr_unroll_kind), build_int_cst (integer_type_node, unroll)); save_break = c_break_label; c_break_label = NULL_TREE; save_cont = c_cont_label; c_cont_label = NULL_TREE; token_indent_info body_tinfo = get_token_indent_info (c_parser_peek_token (parser)); location_t loc_after_labels; bool open_brace = c_parser_next_token_is (parser, CPP_OPEN_BRACE); body = c_parser_c99_block_statement (parser, if_p, &loc_after_labels); c_finish_loop (loc, cond, NULL, body, c_break_label, c_cont_label, true); add_stmt (c_end_compound_stmt (loc, block, flag_isoc99)); c_parser_maybe_reclassify_token (parser); token_indent_info next_tinfo = get_token_indent_info (c_parser_peek_token (parser)); warn_for_misleading_indentation (while_tinfo, body_tinfo, next_tinfo); if (next_tinfo.type != CPP_SEMICOLON && !open_brace) warn_for_multistatement_macros (loc_after_labels, next_tinfo.location, while_tinfo.location, RID_WHILE); c_break_label = save_break; c_cont_label = save_cont; } /* Parse a do statement (C90 6.6.5, C99 6.8.5, C11 6.8.5). do-statement: do statement while ( expression ) ; */ static void c_parser_do_statement (c_parser *parser, bool ivdep, unsigned short unroll) { tree block, cond, body, save_break, save_cont, new_break, new_cont; location_t loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_DO)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) warning_at (c_parser_peek_token (parser)->location, OPT_Wempty_body, "suggest braces around empty body in %<do%> statement"); block = c_begin_compound_stmt (flag_isoc99); loc = c_parser_peek_token (parser)->location; save_break = c_break_label; c_break_label = NULL_TREE; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = c_parser_c99_block_statement (parser, NULL); c_parser_require_keyword (parser, RID_WHILE, "expected %<while%>"); new_break = c_break_label; c_break_label = save_break; new_cont = c_cont_label; c_cont_label = save_cont; cond = c_parser_paren_condition (parser); if (ivdep && cond != error_mark_node) cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond, build_int_cst (integer_type_node, annot_expr_ivdep_kind), integer_zero_node); if (unroll && cond != error_mark_node) cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond, build_int_cst (integer_type_node, annot_expr_unroll_kind), build_int_cst (integer_type_node, unroll)); if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>")) c_parser_skip_to_end_of_block_or_statement (parser); c_finish_loop (loc, cond, NULL, body, new_break, new_cont, false); add_stmt (c_end_compound_stmt (loc, block, flag_isoc99)); } /* Parse a for statement (C90 6.6.5, C99 6.8.5, C11 6.8.5). for-statement: for ( expression[opt] ; expression[opt] ; expression[opt] ) statement for ( nested-declaration expression[opt] ; expression[opt] ) statement The form with a declaration is new in C99. ??? In accordance with the old parser, the declaration may be a nested function, which is then rejected in check_for_loop_decls, but does it make any sense for this to be included in the grammar? Note in particular that the nested function does not include a trailing ';', whereas the "declaration" production includes one. Also, can we reject bad declarations earlier and cheaper than check_for_loop_decls? In Objective-C, there are two additional variants: foreach-statement: for ( expression in expresssion ) statement for ( declaration in expression ) statement This is inconsistent with C, because the second variant is allowed even if c99 is not enabled. The rest of the comment documents these Objective-C foreach-statement. Here is the canonical example of the first variant: for (object in array) { do something with object } we call the first expression ("object") the "object_expression" and the second expression ("array") the "collection_expression". object_expression must be an lvalue of type "id" (a generic Objective-C object) because the loop works by assigning to object_expression the various objects from the collection_expression. collection_expression must evaluate to something of type "id" which responds to the method countByEnumeratingWithState:objects:count:. The canonical example of the second variant is: for (id object in array) { do something with object } which is completely equivalent to { id object; for (object in array) { do something with object } } Note that initizializing 'object' in some way (eg, "for ((object = xxx) in array) { do something with object }") is possibly technically valid, but completely pointless as 'object' will be assigned to something else as soon as the loop starts. We should most likely reject it (TODO). The beginning of the Objective-C foreach-statement looks exactly like the beginning of the for-statement, and we can tell it is a foreach-statement only because the initial declaration or expression is terminated by 'in' instead of ';'. IF_P is used to track whether there's a (possibly labeled) if statement which is not enclosed in braces and has an else clause. This is used to implement -Wparentheses. */ static void c_parser_for_statement (c_parser *parser, bool ivdep, unsigned short unroll, bool *if_p) { tree block, cond, incr, save_break, save_cont, body; /* The following are only used when parsing an ObjC foreach statement. */ tree object_expression; /* Silence the bogus uninitialized warning. */ tree collection_expression = NULL; location_t loc = c_parser_peek_token (parser)->location; location_t for_loc = c_parser_peek_token (parser)->location; bool is_foreach_statement = false; gcc_assert (c_parser_next_token_is_keyword (parser, RID_FOR)); token_indent_info for_tinfo = get_token_indent_info (c_parser_peek_token (parser)); c_parser_consume_token (parser); /* Open a compound statement in Objective-C as well, just in case this is as foreach expression. */ block = c_begin_compound_stmt (flag_isoc99 || c_dialect_objc ()); cond = error_mark_node; incr = error_mark_node; matching_parens parens; if (parens.require_open (parser)) { /* Parse the initialization declaration or expression. */ object_expression = error_mark_node; parser->objc_could_be_foreach_context = c_dialect_objc (); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { parser->objc_could_be_foreach_context = false; c_parser_consume_token (parser); c_finish_expr_stmt (loc, NULL_TREE); } else if (c_parser_next_tokens_start_declaration (parser)) { c_parser_declaration_or_fndef (parser, true, true, true, true, true, &object_expression, vNULL); parser->objc_could_be_foreach_context = false; if (c_parser_next_token_is_keyword (parser, RID_IN)) { c_parser_consume_token (parser); is_foreach_statement = true; if (check_for_loop_decls (for_loc, true) == NULL_TREE) c_parser_error (parser, "multiple iterating variables in fast enumeration"); } else check_for_loop_decls (for_loc, flag_isoc99); } else if (c_parser_next_token_is_keyword (parser, RID_EXTENSION)) { /* __extension__ can start a declaration, but is also an unary operator that can start an expression. Consume all but the last of a possible series of __extension__ to determine which. */ while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD && (c_parser_peek_2nd_token (parser)->keyword == RID_EXTENSION)) c_parser_consume_token (parser); if (c_token_starts_declaration (c_parser_peek_2nd_token (parser))) { int ext; ext = disable_extension_diagnostics (); c_parser_consume_token (parser); c_parser_declaration_or_fndef (parser, true, true, true, true, true, &object_expression, vNULL); parser->objc_could_be_foreach_context = false; restore_extension_diagnostics (ext); if (c_parser_next_token_is_keyword (parser, RID_IN)) { c_parser_consume_token (parser); is_foreach_statement = true; if (check_for_loop_decls (for_loc, true) == NULL_TREE) c_parser_error (parser, "multiple iterating variables in fast enumeration"); } else check_for_loop_decls (for_loc, flag_isoc99); } else goto init_expr; } else { init_expr: { struct c_expr ce; tree init_expression; ce = c_parser_expression (parser); init_expression = ce.value; parser->objc_could_be_foreach_context = false; if (c_parser_next_token_is_keyword (parser, RID_IN)) { c_parser_consume_token (parser); is_foreach_statement = true; if (! lvalue_p (init_expression)) c_parser_error (parser, "invalid iterating variable in fast enumeration"); object_expression = c_fully_fold (init_expression, false, NULL); } else { ce = convert_lvalue_to_rvalue (loc, ce, true, false); init_expression = ce.value; c_finish_expr_stmt (loc, init_expression); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } } } /* Parse the loop condition. In the case of a foreach statement, there is no loop condition. */ gcc_assert (!parser->objc_could_be_foreach_context); if (!is_foreach_statement) { if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { if (ivdep) { c_parser_error (parser, "missing loop condition in loop with " "%<GCC ivdep%> pragma"); cond = error_mark_node; } else if (unroll) { c_parser_error (parser, "missing loop condition in loop with " "%<GCC unroll%> pragma"); cond = error_mark_node; } else { c_parser_consume_token (parser); cond = NULL_TREE; } } else { cond = c_parser_condition (parser); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } if (ivdep && cond != error_mark_node) cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond, build_int_cst (integer_type_node, annot_expr_ivdep_kind), integer_zero_node); if (unroll && cond != error_mark_node) cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond, build_int_cst (integer_type_node, annot_expr_unroll_kind), build_int_cst (integer_type_node, unroll)); } /* Parse the increment expression (the third expression in a for-statement). In the case of a foreach-statement, this is the expression that follows the 'in'. */ if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { if (is_foreach_statement) { c_parser_error (parser, "missing collection in fast enumeration"); collection_expression = error_mark_node; } else incr = c_process_expr_stmt (loc, NULL_TREE); } else { if (is_foreach_statement) collection_expression = c_fully_fold (c_parser_expression (parser).value, false, NULL); else { struct c_expr ce = c_parser_expression (parser); ce = convert_lvalue_to_rvalue (loc, ce, true, false); incr = c_process_expr_stmt (loc, ce.value); } } parens.skip_until_found_close (parser); } save_break = c_break_label; c_break_label = NULL_TREE; save_cont = c_cont_label; c_cont_label = NULL_TREE; token_indent_info body_tinfo = get_token_indent_info (c_parser_peek_token (parser)); location_t loc_after_labels; bool open_brace = c_parser_next_token_is (parser, CPP_OPEN_BRACE); body = c_parser_c99_block_statement (parser, if_p, &loc_after_labels); if (is_foreach_statement) objc_finish_foreach_loop (loc, object_expression, collection_expression, body, c_break_label, c_cont_label); else c_finish_loop (loc, cond, incr, body, c_break_label, c_cont_label, true); add_stmt (c_end_compound_stmt (loc, block, flag_isoc99 || c_dialect_objc ())); c_parser_maybe_reclassify_token (parser); token_indent_info next_tinfo = get_token_indent_info (c_parser_peek_token (parser)); warn_for_misleading_indentation (for_tinfo, body_tinfo, next_tinfo); if (next_tinfo.type != CPP_SEMICOLON && !open_brace) warn_for_multistatement_macros (loc_after_labels, next_tinfo.location, for_tinfo.location, RID_FOR); c_break_label = save_break; c_cont_label = save_cont; } /* Parse an asm statement, a GNU extension. This is a full-blown asm statement with inputs, outputs, clobbers, and volatile tag allowed. asm-statement: asm type-qualifier[opt] ( asm-argument ) ; asm type-qualifier[opt] goto ( asm-goto-argument ) ; asm-argument: asm-string-literal asm-string-literal : asm-operands[opt] asm-string-literal : asm-operands[opt] : asm-operands[opt] asm-string-literal : asm-operands[opt] : asm-operands[opt] : asm-clobbers[opt] asm-goto-argument: asm-string-literal : : asm-operands[opt] : asm-clobbers[opt] \ : asm-goto-operands Qualifiers other than volatile are accepted in the syntax but warned for. */ static tree c_parser_asm_statement (c_parser *parser) { tree quals, str, outputs, inputs, clobbers, labels, ret; bool simple, is_goto; location_t asm_loc = c_parser_peek_token (parser)->location; int section, nsections; gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM)); c_parser_consume_token (parser); if (c_parser_next_token_is_keyword (parser, RID_VOLATILE)) { quals = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else if (c_parser_next_token_is_keyword (parser, RID_CONST) || c_parser_next_token_is_keyword (parser, RID_RESTRICT)) { warning_at (c_parser_peek_token (parser)->location, 0, "%E qualifier ignored on asm", c_parser_peek_token (parser)->value); quals = NULL_TREE; c_parser_consume_token (parser); } else quals = NULL_TREE; is_goto = false; if (c_parser_next_token_is_keyword (parser, RID_GOTO)) { c_parser_consume_token (parser); is_goto = true; } /* ??? Follow the C++ parser rather than using the lex_untranslated_string kludge. */ parser->lex_untranslated_string = true; ret = NULL; matching_parens parens; if (!parens.require_open (parser)) goto error; str = c_parser_asm_string_literal (parser); if (str == NULL_TREE) goto error_close_paren; simple = true; outputs = NULL_TREE; inputs = NULL_TREE; clobbers = NULL_TREE; labels = NULL_TREE; if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN) && !is_goto) goto done_asm; /* Parse each colon-delimited section of operands. */ nsections = 3 + is_goto; for (section = 0; section < nsections; ++section) { if (!c_parser_require (parser, CPP_COLON, is_goto ? G_("expected %<:%>") : G_("expected %<:%> or %<)%>"), UNKNOWN_LOCATION, is_goto)) goto error_close_paren; /* Once past any colon, we're no longer a simple asm. */ simple = false; if ((!c_parser_next_token_is (parser, CPP_COLON) && !c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) || section == 3) switch (section) { case 0: /* For asm goto, we don't allow output operands, but reserve the slot for a future extension that does allow them. */ if (!is_goto) outputs = c_parser_asm_operands (parser); break; case 1: inputs = c_parser_asm_operands (parser); break; case 2: clobbers = c_parser_asm_clobbers (parser); break; case 3: labels = c_parser_asm_goto_operands (parser); break; default: gcc_unreachable (); } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN) && !is_goto) goto done_asm; } done_asm: if (!parens.require_close (parser)) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); goto error; } if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>")) c_parser_skip_to_end_of_block_or_statement (parser); ret = build_asm_stmt (quals, build_asm_expr (asm_loc, str, outputs, inputs, clobbers, labels, simple)); error: parser->lex_untranslated_string = false; return ret; error_close_paren: c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); goto error; } /* Parse asm operands, a GNU extension. asm-operands: asm-operand asm-operands , asm-operand asm-operand: asm-string-literal ( expression ) [ identifier ] asm-string-literal ( expression ) */ static tree c_parser_asm_operands (c_parser *parser) { tree list = NULL_TREE; while (true) { tree name, str; struct c_expr expr; if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) { c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); name = build_string (IDENTIFIER_LENGTH (id), IDENTIFIER_POINTER (id)); } else { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, NULL); return NULL_TREE; } c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); } else name = NULL_TREE; str = c_parser_asm_string_literal (parser); if (str == NULL_TREE) return NULL_TREE; parser->lex_untranslated_string = false; matching_parens parens; if (!parens.require_open (parser)) { parser->lex_untranslated_string = true; return NULL_TREE; } expr = c_parser_expression (parser); mark_exp_read (expr.value); parser->lex_untranslated_string = true; if (!parens.require_close (parser)) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return NULL_TREE; } list = chainon (list, build_tree_list (build_tree_list (name, str), expr.value)); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } return list; } /* Parse asm clobbers, a GNU extension. asm-clobbers: asm-string-literal asm-clobbers , asm-string-literal */ static tree c_parser_asm_clobbers (c_parser *parser) { tree list = NULL_TREE; while (true) { tree str = c_parser_asm_string_literal (parser); if (str) list = tree_cons (NULL_TREE, str, list); else return NULL_TREE; if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } return list; } /* Parse asm goto labels, a GNU extension. asm-goto-operands: identifier asm-goto-operands , identifier */ static tree c_parser_asm_goto_operands (c_parser *parser) { tree list = NULL_TREE; while (true) { tree name, label; if (c_parser_next_token_is (parser, CPP_NAME)) { c_token *tok = c_parser_peek_token (parser); name = tok->value; label = lookup_label_for_goto (tok->location, name); c_parser_consume_token (parser); TREE_USED (label) = 1; } else { c_parser_error (parser, "expected identifier"); return NULL_TREE; } name = build_string (IDENTIFIER_LENGTH (name), IDENTIFIER_POINTER (name)); list = tree_cons (name, label, list); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else return nreverse (list); } } /* Parse an expression other than a compound expression; that is, an assignment expression (C90 6.3.16, C99 6.5.16, C11 6.5.16). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. assignment-expression: conditional-expression unary-expression assignment-operator assignment-expression assignment-operator: one of = *= /= %= += -= <<= >>= &= ^= |= In GNU C we accept any conditional expression on the LHS and diagnose the invalid lvalue rather than producing a syntax error. */ static struct c_expr c_parser_expr_no_commas (c_parser *parser, struct c_expr *after, tree omp_atomic_lhs) { struct c_expr lhs, rhs, ret; enum tree_code code; location_t op_location, exp_location; gcc_assert (!after || c_dialect_objc ()); lhs = c_parser_conditional_expression (parser, after, omp_atomic_lhs); op_location = c_parser_peek_token (parser)->location; switch (c_parser_peek_token (parser)->type) { case CPP_EQ: code = NOP_EXPR; break; case CPP_MULT_EQ: code = MULT_EXPR; break; case CPP_DIV_EQ: code = TRUNC_DIV_EXPR; break; case CPP_MOD_EQ: code = TRUNC_MOD_EXPR; break; case CPP_PLUS_EQ: code = PLUS_EXPR; break; case CPP_MINUS_EQ: code = MINUS_EXPR; break; case CPP_LSHIFT_EQ: code = LSHIFT_EXPR; break; case CPP_RSHIFT_EQ: code = RSHIFT_EXPR; break; case CPP_AND_EQ: code = BIT_AND_EXPR; break; case CPP_XOR_EQ: code = BIT_XOR_EXPR; break; case CPP_OR_EQ: code = BIT_IOR_EXPR; break; default: return lhs; } c_parser_consume_token (parser); exp_location = c_parser_peek_token (parser)->location; rhs = c_parser_expr_no_commas (parser, NULL); rhs = convert_lvalue_to_rvalue (exp_location, rhs, true, true); ret.value = build_modify_expr (op_location, lhs.value, lhs.original_type, code, exp_location, rhs.value, rhs.original_type); set_c_expr_source_range (&ret, lhs.get_start (), rhs.get_finish ()); if (code == NOP_EXPR) ret.original_code = MODIFY_EXPR; else { TREE_NO_WARNING (ret.value) = 1; ret.original_code = ERROR_MARK; } ret.original_type = NULL; return ret; } /* Parse a conditional expression (C90 6.3.15, C99 6.5.15, C11 6.5.15). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. conditional-expression: logical-OR-expression logical-OR-expression ? expression : conditional-expression GNU extensions: conditional-expression: logical-OR-expression ? : conditional-expression */ static struct c_expr c_parser_conditional_expression (c_parser *parser, struct c_expr *after, tree omp_atomic_lhs) { struct c_expr cond, exp1, exp2, ret; location_t start, cond_loc, colon_loc; gcc_assert (!after || c_dialect_objc ()); cond = c_parser_binary_expression (parser, after, omp_atomic_lhs); if (c_parser_next_token_is_not (parser, CPP_QUERY)) return cond; if (cond.value != error_mark_node) start = cond.get_start (); else start = UNKNOWN_LOCATION; cond_loc = c_parser_peek_token (parser)->location; cond = convert_lvalue_to_rvalue (cond_loc, cond, true, true); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COLON)) { tree eptype = NULL_TREE; location_t middle_loc = c_parser_peek_token (parser)->location; pedwarn (middle_loc, OPT_Wpedantic, "ISO C forbids omitting the middle term of a ?: expression"); if (TREE_CODE (cond.value) == EXCESS_PRECISION_EXPR) { eptype = TREE_TYPE (cond.value); cond.value = TREE_OPERAND (cond.value, 0); } tree e = cond.value; while (TREE_CODE (e) == COMPOUND_EXPR) e = TREE_OPERAND (e, 1); warn_for_omitted_condop (middle_loc, e); /* Make sure first operand is calculated only once. */ exp1.value = save_expr (default_conversion (cond.value)); if (eptype) exp1.value = build1 (EXCESS_PRECISION_EXPR, eptype, exp1.value); exp1.original_type = NULL; exp1.src_range = cond.src_range; cond.value = c_objc_common_truthvalue_conversion (cond_loc, exp1.value); c_inhibit_evaluation_warnings += cond.value == truthvalue_true_node; } else { cond.value = c_objc_common_truthvalue_conversion (cond_loc, default_conversion (cond.value)); c_inhibit_evaluation_warnings += cond.value == truthvalue_false_node; exp1 = c_parser_expression_conv (parser); mark_exp_read (exp1.value); c_inhibit_evaluation_warnings += ((cond.value == truthvalue_true_node) - (cond.value == truthvalue_false_node)); } colon_loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) { c_inhibit_evaluation_warnings -= cond.value == truthvalue_true_node; ret.set_error (); ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } { location_t exp2_loc = c_parser_peek_token (parser)->location; exp2 = c_parser_conditional_expression (parser, NULL, NULL_TREE); exp2 = convert_lvalue_to_rvalue (exp2_loc, exp2, true, true); } c_inhibit_evaluation_warnings -= cond.value == truthvalue_true_node; location_t loc1 = make_location (exp1.get_start (), exp1.src_range); location_t loc2 = make_location (exp2.get_start (), exp2.src_range); ret.value = build_conditional_expr (colon_loc, cond.value, cond.original_code == C_MAYBE_CONST_EXPR, exp1.value, exp1.original_type, loc1, exp2.value, exp2.original_type, loc2); ret.original_code = ERROR_MARK; if (exp1.value == error_mark_node || exp2.value == error_mark_node) ret.original_type = NULL; else { tree t1, t2; /* If both sides are enum type, the default conversion will have made the type of the result be an integer type. We want to remember the enum types we started with. */ t1 = exp1.original_type ? exp1.original_type : TREE_TYPE (exp1.value); t2 = exp2.original_type ? exp2.original_type : TREE_TYPE (exp2.value); ret.original_type = ((t1 != error_mark_node && t2 != error_mark_node && (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))) ? t1 : NULL); } set_c_expr_source_range (&ret, start, exp2.get_finish ()); return ret; } /* Parse a binary expression; that is, a logical-OR-expression (C90 6.3.5-6.3.14, C99 6.5.5-6.5.14, C11 6.5.5-6.5.14). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. OMP_ATOMIC_LHS is NULL, unless parsing OpenMP #pragma omp atomic, when it should be the unfolded lhs. In a valid OpenMP source, one of the operands of the toplevel binary expression must be equal to it. In that case, just return a build2 created binary operation rather than result of parser_build_binary_op. multiplicative-expression: cast-expression multiplicative-expression * cast-expression multiplicative-expression / cast-expression multiplicative-expression % cast-expression additive-expression: multiplicative-expression additive-expression + multiplicative-expression additive-expression - multiplicative-expression shift-expression: additive-expression shift-expression << additive-expression shift-expression >> additive-expression relational-expression: shift-expression relational-expression < shift-expression relational-expression > shift-expression relational-expression <= shift-expression relational-expression >= shift-expression equality-expression: relational-expression equality-expression == relational-expression equality-expression != relational-expression AND-expression: equality-expression AND-expression & equality-expression exclusive-OR-expression: AND-expression exclusive-OR-expression ^ AND-expression inclusive-OR-expression: exclusive-OR-expression inclusive-OR-expression | exclusive-OR-expression logical-AND-expression: inclusive-OR-expression logical-AND-expression && inclusive-OR-expression logical-OR-expression: logical-AND-expression logical-OR-expression || logical-AND-expression */ static struct c_expr c_parser_binary_expression (c_parser *parser, struct c_expr *after, tree omp_atomic_lhs) { /* A binary expression is parsed using operator-precedence parsing, with the operands being cast expressions. All the binary operators are left-associative. Thus a binary expression is of form: E0 op1 E1 op2 E2 ... which we represent on a stack. On the stack, the precedence levels are strictly increasing. When a new operator is encountered of higher precedence than that at the top of the stack, it is pushed; its LHS is the top expression, and its RHS is everything parsed until it is popped. When a new operator is encountered with precedence less than or equal to that at the top of the stack, triples E[i-1] op[i] E[i] are popped and replaced by the result of the operation until the operator at the top of the stack has lower precedence than the new operator or there is only one element on the stack; then the top expression is the LHS of the new operator. In the case of logical AND and OR expressions, we also need to adjust c_inhibit_evaluation_warnings as appropriate when the operators are pushed and popped. */ struct { /* The expression at this stack level. */ struct c_expr expr; /* The precedence of the operator on its left, PREC_NONE at the bottom of the stack. */ enum c_parser_prec prec; /* The operation on its left. */ enum tree_code op; /* The source location of this operation. */ location_t loc; /* The sizeof argument if expr.original_code == SIZEOF_EXPR. */ tree sizeof_arg; } stack[NUM_PRECS]; int sp; /* Location of the binary operator. */ location_t binary_loc = UNKNOWN_LOCATION; /* Quiet warning. */ #define POP \ do { \ switch (stack[sp].op) \ { \ case TRUTH_ANDIF_EXPR: \ c_inhibit_evaluation_warnings -= (stack[sp - 1].expr.value \ == truthvalue_false_node); \ break; \ case TRUTH_ORIF_EXPR: \ c_inhibit_evaluation_warnings -= (stack[sp - 1].expr.value \ == truthvalue_true_node); \ break; \ case TRUNC_DIV_EXPR: \ if (stack[sp - 1].expr.original_code == SIZEOF_EXPR \ && stack[sp].expr.original_code == SIZEOF_EXPR) \ { \ tree type0 = stack[sp - 1].sizeof_arg; \ tree type1 = stack[sp].sizeof_arg; \ tree first_arg = type0; \ if (!TYPE_P (type0)) \ type0 = TREE_TYPE (type0); \ if (!TYPE_P (type1)) \ type1 = TREE_TYPE (type1); \ if (POINTER_TYPE_P (type0) \ && comptypes (TREE_TYPE (type0), type1) \ && !(TREE_CODE (first_arg) == PARM_DECL \ && C_ARRAY_PARAMETER (first_arg) \ && warn_sizeof_array_argument)) \ if (warning_at (stack[sp].loc, OPT_Wsizeof_pointer_div, \ "division %<sizeof (%T) / sizeof (%T)%> does " \ "not compute the number of array elements", \ type0, type1)) \ if (DECL_P (first_arg)) \ inform (DECL_SOURCE_LOCATION (first_arg), \ "first %<sizeof%> operand was declared here"); \ } \ break; \ default: \ break; \ } \ stack[sp - 1].expr \ = convert_lvalue_to_rvalue (stack[sp - 1].loc, \ stack[sp - 1].expr, true, true); \ stack[sp].expr \ = convert_lvalue_to_rvalue (stack[sp].loc, \ stack[sp].expr, true, true); \ if (__builtin_expect (omp_atomic_lhs != NULL_TREE, 0) && sp == 1 \ && c_parser_peek_token (parser)->type == CPP_SEMICOLON \ && ((1 << stack[sp].prec) \ & ((1 << PREC_BITOR) | (1 << PREC_BITXOR) | (1 << PREC_BITAND) \ | (1 << PREC_SHIFT) | (1 << PREC_ADD) | (1 << PREC_MULT))) \ && stack[sp].op != TRUNC_MOD_EXPR \ && stack[0].expr.value != error_mark_node \ && stack[1].expr.value != error_mark_node \ && (c_tree_equal (stack[0].expr.value, omp_atomic_lhs) \ || c_tree_equal (stack[1].expr.value, omp_atomic_lhs))) \ stack[0].expr.value \ = build2 (stack[1].op, TREE_TYPE (stack[0].expr.value), \ stack[0].expr.value, stack[1].expr.value); \ else \ stack[sp - 1].expr = parser_build_binary_op (stack[sp].loc, \ stack[sp].op, \ stack[sp - 1].expr, \ stack[sp].expr); \ sp--; \ } while (0) gcc_assert (!after || c_dialect_objc ()); stack[0].loc = c_parser_peek_token (parser)->location; stack[0].expr = c_parser_cast_expression (parser, after); stack[0].prec = PREC_NONE; stack[0].sizeof_arg = c_last_sizeof_arg; sp = 0; while (true) { enum c_parser_prec oprec; enum tree_code ocode; source_range src_range; if (parser->error) goto out; switch (c_parser_peek_token (parser)->type) { case CPP_MULT: oprec = PREC_MULT; ocode = MULT_EXPR; break; case CPP_DIV: oprec = PREC_MULT; ocode = TRUNC_DIV_EXPR; break; case CPP_MOD: oprec = PREC_MULT; ocode = TRUNC_MOD_EXPR; break; case CPP_PLUS: oprec = PREC_ADD; ocode = PLUS_EXPR; break; case CPP_MINUS: oprec = PREC_ADD; ocode = MINUS_EXPR; break; case CPP_LSHIFT: oprec = PREC_SHIFT; ocode = LSHIFT_EXPR; break; case CPP_RSHIFT: oprec = PREC_SHIFT; ocode = RSHIFT_EXPR; break; case CPP_LESS: oprec = PREC_REL; ocode = LT_EXPR; break; case CPP_GREATER: oprec = PREC_REL; ocode = GT_EXPR; break; case CPP_LESS_EQ: oprec = PREC_REL; ocode = LE_EXPR; break; case CPP_GREATER_EQ: oprec = PREC_REL; ocode = GE_EXPR; break; case CPP_EQ_EQ: oprec = PREC_EQ; ocode = EQ_EXPR; break; case CPP_NOT_EQ: oprec = PREC_EQ; ocode = NE_EXPR; break; case CPP_AND: oprec = PREC_BITAND; ocode = BIT_AND_EXPR; break; case CPP_XOR: oprec = PREC_BITXOR; ocode = BIT_XOR_EXPR; break; case CPP_OR: oprec = PREC_BITOR; ocode = BIT_IOR_EXPR; break; case CPP_AND_AND: oprec = PREC_LOGAND; ocode = TRUTH_ANDIF_EXPR; break; case CPP_OR_OR: oprec = PREC_LOGOR; ocode = TRUTH_ORIF_EXPR; break; default: /* Not a binary operator, so end of the binary expression. */ goto out; } binary_loc = c_parser_peek_token (parser)->location; while (oprec <= stack[sp].prec) POP; c_parser_consume_token (parser); switch (ocode) { case TRUTH_ANDIF_EXPR: src_range = stack[sp].expr.src_range; stack[sp].expr = convert_lvalue_to_rvalue (stack[sp].loc, stack[sp].expr, true, true); stack[sp].expr.value = c_objc_common_truthvalue_conversion (stack[sp].loc, default_conversion (stack[sp].expr.value)); c_inhibit_evaluation_warnings += (stack[sp].expr.value == truthvalue_false_node); set_c_expr_source_range (&stack[sp].expr, src_range); break; case TRUTH_ORIF_EXPR: src_range = stack[sp].expr.src_range; stack[sp].expr = convert_lvalue_to_rvalue (stack[sp].loc, stack[sp].expr, true, true); stack[sp].expr.value = c_objc_common_truthvalue_conversion (stack[sp].loc, default_conversion (stack[sp].expr.value)); c_inhibit_evaluation_warnings += (stack[sp].expr.value == truthvalue_true_node); set_c_expr_source_range (&stack[sp].expr, src_range); break; default: break; } sp++; stack[sp].loc = binary_loc; stack[sp].expr = c_parser_cast_expression (parser, NULL); stack[sp].prec = oprec; stack[sp].op = ocode; stack[sp].sizeof_arg = c_last_sizeof_arg; } out: while (sp > 0) POP; return stack[0].expr; #undef POP } /* Parse a cast expression (C90 6.3.4, C99 6.5.4, C11 6.5.4). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. cast-expression: unary-expression ( type-name ) unary-expression */ static struct c_expr c_parser_cast_expression (c_parser *parser, struct c_expr *after) { location_t cast_loc = c_parser_peek_token (parser)->location; gcc_assert (!after || c_dialect_objc ()); if (after) return c_parser_postfix_expression_after_primary (parser, cast_loc, *after); /* If the expression begins with a parenthesized type name, it may be either a cast or a compound literal; we need to see whether the next character is '{' to tell the difference. If not, it is an unary expression. Full detection of unknown typenames here would require a 3-token lookahead. */ if (c_parser_next_token_is (parser, CPP_OPEN_PAREN) && c_token_starts_typename (c_parser_peek_2nd_token (parser))) { struct c_type_name *type_name; struct c_expr ret; struct c_expr expr; matching_parens parens; parens.consume_open (parser); type_name = c_parser_type_name (parser, true); parens.skip_until_found_close (parser); if (type_name == NULL) { ret.set_error (); ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } /* Save casted types in the function's used types hash table. */ used_types_insert (type_name->specs->type); if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) return c_parser_postfix_expression_after_paren_type (parser, type_name, cast_loc); if (type_name->specs->alignas_p) error_at (type_name->specs->locations[cdw_alignas], "alignment specified for type name in cast"); { location_t expr_loc = c_parser_peek_token (parser)->location; expr = c_parser_cast_expression (parser, NULL); expr = convert_lvalue_to_rvalue (expr_loc, expr, true, true); } ret.value = c_cast_expr (cast_loc, type_name, expr.value); if (ret.value && expr.value) set_c_expr_source_range (&ret, cast_loc, expr.get_finish ()); ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } else return c_parser_unary_expression (parser); } /* Parse an unary expression (C90 6.3.3, C99 6.5.3, C11 6.5.3). unary-expression: postfix-expression ++ unary-expression -- unary-expression unary-operator cast-expression sizeof unary-expression sizeof ( type-name ) unary-operator: one of & * + - ~ ! GNU extensions: unary-expression: __alignof__ unary-expression __alignof__ ( type-name ) && identifier (C11 permits _Alignof with type names only.) unary-operator: one of __extension__ __real__ __imag__ Transactional Memory: unary-expression: transaction-expression In addition, the GNU syntax treats ++ and -- as unary operators, so they may be applied to cast expressions with errors for non-lvalues given later. */ static struct c_expr c_parser_unary_expression (c_parser *parser) { int ext; struct c_expr ret, op; location_t op_loc = c_parser_peek_token (parser)->location; location_t exp_loc; location_t finish; ret.original_code = ERROR_MARK; ret.original_type = NULL; switch (c_parser_peek_token (parser)->type) { case CPP_PLUS_PLUS: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_read_conversion (exp_loc, op); return parser_build_unary_op (op_loc, PREINCREMENT_EXPR, op); case CPP_MINUS_MINUS: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_read_conversion (exp_loc, op); return parser_build_unary_op (op_loc, PREDECREMENT_EXPR, op); case CPP_AND: c_parser_consume_token (parser); op = c_parser_cast_expression (parser, NULL); mark_exp_read (op.value); return parser_build_unary_op (op_loc, ADDR_EXPR, op); case CPP_MULT: { c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); finish = op.get_finish (); op = convert_lvalue_to_rvalue (exp_loc, op, true, true); location_t combined_loc = make_location (op_loc, op_loc, finish); ret.value = build_indirect_ref (combined_loc, op.value, RO_UNARY_STAR); ret.src_range.m_start = op_loc; ret.src_range.m_finish = finish; return ret; } case CPP_PLUS: if (!c_dialect_objc () && !in_system_header_at (input_location)) warning_at (op_loc, OPT_Wtraditional, "traditional C rejects the unary plus operator"); c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = convert_lvalue_to_rvalue (exp_loc, op, true, true); return parser_build_unary_op (op_loc, CONVERT_EXPR, op); case CPP_MINUS: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = convert_lvalue_to_rvalue (exp_loc, op, true, true); return parser_build_unary_op (op_loc, NEGATE_EXPR, op); case CPP_COMPL: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = convert_lvalue_to_rvalue (exp_loc, op, true, true); return parser_build_unary_op (op_loc, BIT_NOT_EXPR, op); case CPP_NOT: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = convert_lvalue_to_rvalue (exp_loc, op, true, true); return parser_build_unary_op (op_loc, TRUTH_NOT_EXPR, op); case CPP_AND_AND: /* Refer to the address of a label as a pointer. */ c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { ret.value = finish_label_address_expr (c_parser_peek_token (parser)->value, op_loc); set_c_expr_source_range (&ret, op_loc, c_parser_peek_token (parser)->get_finish ()); c_parser_consume_token (parser); } else { c_parser_error (parser, "expected identifier"); ret.set_error (); } return ret; case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_SIZEOF: return c_parser_sizeof_expression (parser); case RID_ALIGNOF: return c_parser_alignof_expression (parser); case RID_EXTENSION: c_parser_consume_token (parser); ext = disable_extension_diagnostics (); ret = c_parser_cast_expression (parser, NULL); restore_extension_diagnostics (ext); return ret; case RID_REALPART: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, REALPART_EXPR, op); case RID_IMAGPART: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, IMAGPART_EXPR, op); case RID_TRANSACTION_ATOMIC: case RID_TRANSACTION_RELAXED: return c_parser_transaction_expression (parser, c_parser_peek_token (parser)->keyword); default: return c_parser_postfix_expression (parser); } default: return c_parser_postfix_expression (parser); } } /* Parse a sizeof expression. */ static struct c_expr c_parser_sizeof_expression (c_parser *parser) { struct c_expr expr; struct c_expr result; location_t expr_loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_SIZEOF)); location_t start; location_t finish = UNKNOWN_LOCATION; start = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); c_inhibit_evaluation_warnings++; in_sizeof++; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN) && c_token_starts_typename (c_parser_peek_2nd_token (parser))) { /* Either sizeof ( type-name ) or sizeof unary-expression starting with a compound literal. */ struct c_type_name *type_name; matching_parens parens; parens.consume_open (parser); expr_loc = c_parser_peek_token (parser)->location; type_name = c_parser_type_name (parser, true); parens.skip_until_found_close (parser); finish = parser->tokens_buf[0].location; if (type_name == NULL) { struct c_expr ret; c_inhibit_evaluation_warnings--; in_sizeof--; ret.set_error (); ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { expr = c_parser_postfix_expression_after_paren_type (parser, type_name, expr_loc); finish = expr.get_finish (); goto sizeof_expr; } /* sizeof ( type-name ). */ if (type_name->specs->alignas_p) error_at (type_name->specs->locations[cdw_alignas], "alignment specified for type name in %<sizeof%>"); c_inhibit_evaluation_warnings--; in_sizeof--; result = c_expr_sizeof_type (expr_loc, type_name); } else { expr_loc = c_parser_peek_token (parser)->location; expr = c_parser_unary_expression (parser); finish = expr.get_finish (); sizeof_expr: c_inhibit_evaluation_warnings--; in_sizeof--; mark_exp_read (expr.value); if (TREE_CODE (expr.value) == COMPONENT_REF && DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1))) error_at (expr_loc, "%<sizeof%> applied to a bit-field"); result = c_expr_sizeof_expr (expr_loc, expr); } if (finish != UNKNOWN_LOCATION) set_c_expr_source_range (&result, start, finish); return result; } /* Parse an alignof expression. */ static struct c_expr c_parser_alignof_expression (c_parser *parser) { struct c_expr expr; location_t start_loc = c_parser_peek_token (parser)->location; location_t end_loc; tree alignof_spelling = c_parser_peek_token (parser)->value; gcc_assert (c_parser_next_token_is_keyword (parser, RID_ALIGNOF)); bool is_c11_alignof = strcmp (IDENTIFIER_POINTER (alignof_spelling), "_Alignof") == 0; /* A diagnostic is not required for the use of this identifier in the implementation namespace; only diagnose it for the C11 spelling because of existing code using the other spellings. */ if (is_c11_alignof) { if (flag_isoc99) pedwarn_c99 (start_loc, OPT_Wpedantic, "ISO C99 does not support %qE", alignof_spelling); else pedwarn_c99 (start_loc, OPT_Wpedantic, "ISO C90 does not support %qE", alignof_spelling); } c_parser_consume_token (parser); c_inhibit_evaluation_warnings++; in_alignof++; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN) && c_token_starts_typename (c_parser_peek_2nd_token (parser))) { /* Either __alignof__ ( type-name ) or __alignof__ unary-expression starting with a compound literal. */ location_t loc; struct c_type_name *type_name; struct c_expr ret; matching_parens parens; parens.consume_open (parser); loc = c_parser_peek_token (parser)->location; type_name = c_parser_type_name (parser, true); end_loc = c_parser_peek_token (parser)->location; parens.skip_until_found_close (parser); if (type_name == NULL) { struct c_expr ret; c_inhibit_evaluation_warnings--; in_alignof--; ret.set_error (); ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { expr = c_parser_postfix_expression_after_paren_type (parser, type_name, loc); goto alignof_expr; } /* alignof ( type-name ). */ if (type_name->specs->alignas_p) error_at (type_name->specs->locations[cdw_alignas], "alignment specified for type name in %qE", alignof_spelling); c_inhibit_evaluation_warnings--; in_alignof--; ret.value = c_sizeof_or_alignof_type (loc, groktypename (type_name, NULL, NULL), false, is_c11_alignof, 1); ret.original_code = ERROR_MARK; ret.original_type = NULL; set_c_expr_source_range (&ret, start_loc, end_loc); return ret; } else { struct c_expr ret; expr = c_parser_unary_expression (parser); end_loc = expr.src_range.m_finish; alignof_expr: mark_exp_read (expr.value); c_inhibit_evaluation_warnings--; in_alignof--; if (is_c11_alignof) pedwarn (start_loc, OPT_Wpedantic, "ISO C does not allow %<%E (expression)%>", alignof_spelling); ret.value = c_alignof_expr (start_loc, expr.value); ret.original_code = ERROR_MARK; ret.original_type = NULL; set_c_expr_source_range (&ret, start_loc, end_loc); return ret; } } /* Helper function to read arguments of builtins which are interfaces for the middle-end nodes like COMPLEX_EXPR, VEC_PERM_EXPR and others. The name of the builtin is passed using BNAME parameter. Function returns true if there were no errors while parsing and stores the arguments in CEXPR_LIST. If it returns true, *OUT_CLOSE_PAREN_LOC is written to with the location of the closing parenthesis. */ static bool c_parser_get_builtin_args (c_parser *parser, const char *bname, vec<c_expr_t, va_gc> **ret_cexpr_list, bool choose_expr_p, location_t *out_close_paren_loc) { location_t loc = c_parser_peek_token (parser)->location; vec<c_expr_t, va_gc> *cexpr_list; c_expr_t expr; bool saved_force_folding_builtin_constant_p; *ret_cexpr_list = NULL; if (c_parser_next_token_is_not (parser, CPP_OPEN_PAREN)) { error_at (loc, "cannot take address of %qs", bname); return false; } c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { *out_close_paren_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); return true; } saved_force_folding_builtin_constant_p = force_folding_builtin_constant_p; force_folding_builtin_constant_p |= choose_expr_p; expr = c_parser_expr_no_commas (parser, NULL); force_folding_builtin_constant_p = saved_force_folding_builtin_constant_p; vec_alloc (cexpr_list, 1); vec_safe_push (cexpr_list, expr); while (c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); expr = c_parser_expr_no_commas (parser, NULL); vec_safe_push (cexpr_list, expr); } *out_close_paren_loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) return false; *ret_cexpr_list = cexpr_list; return true; } /* This represents a single generic-association. */ struct c_generic_association { /* The location of the starting token of the type. */ location_t type_location; /* The association's type, or NULL_TREE for 'default'. */ tree type; /* The association's expression. */ struct c_expr expression; }; /* Parse a generic-selection. (C11 6.5.1.1). generic-selection: _Generic ( assignment-expression , generic-assoc-list ) generic-assoc-list: generic-association generic-assoc-list , generic-association generic-association: type-name : assignment-expression default : assignment-expression */ static struct c_expr c_parser_generic_selection (c_parser *parser) { struct c_expr selector, error_expr; tree selector_type; struct c_generic_association matched_assoc; bool match_found = false; location_t generic_loc, selector_loc; error_expr.original_code = ERROR_MARK; error_expr.original_type = NULL; error_expr.set_error (); matched_assoc.type_location = UNKNOWN_LOCATION; matched_assoc.type = NULL_TREE; matched_assoc.expression = error_expr; gcc_assert (c_parser_next_token_is_keyword (parser, RID_GENERIC)); generic_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); if (flag_isoc99) pedwarn_c99 (generic_loc, OPT_Wpedantic, "ISO C99 does not support %<_Generic%>"); else pedwarn_c99 (generic_loc, OPT_Wpedantic, "ISO C90 does not support %<_Generic%>"); matching_parens parens; if (!parens.require_open (parser)) return error_expr; c_inhibit_evaluation_warnings++; selector_loc = c_parser_peek_token (parser)->location; selector = c_parser_expr_no_commas (parser, NULL); selector = default_function_array_conversion (selector_loc, selector); c_inhibit_evaluation_warnings--; if (selector.value == error_mark_node) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return selector; } selector_type = TREE_TYPE (selector.value); /* In ISO C terms, rvalues (including the controlling expression of _Generic) do not have qualified types. */ if (TREE_CODE (selector_type) != ARRAY_TYPE) selector_type = TYPE_MAIN_VARIANT (selector_type); /* In ISO C terms, _Noreturn is not part of the type of expressions such as &abort, but in GCC it is represented internally as a type qualifier. */ if (FUNCTION_POINTER_TYPE_P (selector_type) && TYPE_QUALS (TREE_TYPE (selector_type)) != TYPE_UNQUALIFIED) selector_type = build_pointer_type (TYPE_MAIN_VARIANT (TREE_TYPE (selector_type))); if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return error_expr; } auto_vec<c_generic_association> associations; while (1) { struct c_generic_association assoc, *iter; unsigned int ix; c_token *token = c_parser_peek_token (parser); assoc.type_location = token->location; if (token->type == CPP_KEYWORD && token->keyword == RID_DEFAULT) { c_parser_consume_token (parser); assoc.type = NULL_TREE; } else { struct c_type_name *type_name; type_name = c_parser_type_name (parser); if (type_name == NULL) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return error_expr; } assoc.type = groktypename (type_name, NULL, NULL); if (assoc.type == error_mark_node) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return error_expr; } if (TREE_CODE (assoc.type) == FUNCTION_TYPE) error_at (assoc.type_location, "%<_Generic%> association has function type"); else if (!COMPLETE_TYPE_P (assoc.type)) error_at (assoc.type_location, "%<_Generic%> association has incomplete type"); if (variably_modified_type_p (assoc.type, NULL_TREE)) error_at (assoc.type_location, "%<_Generic%> association has " "variable length type"); } if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return error_expr; } assoc.expression = c_parser_expr_no_commas (parser, NULL); if (assoc.expression.value == error_mark_node) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return error_expr; } for (ix = 0; associations.iterate (ix, &iter); ++ix) { if (assoc.type == NULL_TREE) { if (iter->type == NULL_TREE) { error_at (assoc.type_location, "duplicate %<default%> case in %<_Generic%>"); inform (iter->type_location, "original %<default%> is here"); } } else if (iter->type != NULL_TREE) { if (comptypes (assoc.type, iter->type)) { error_at (assoc.type_location, "%<_Generic%> specifies two compatible types"); inform (iter->type_location, "compatible type is here"); } } } if (assoc.type == NULL_TREE) { if (!match_found) { matched_assoc = assoc; match_found = true; } } else if (comptypes (assoc.type, selector_type)) { if (!match_found || matched_assoc.type == NULL_TREE) { matched_assoc = assoc; match_found = true; } else { error_at (assoc.type_location, "%<_Generic%> selector matches multiple associations"); inform (matched_assoc.type_location, "other match is here"); } } associations.safe_push (assoc); if (c_parser_peek_token (parser)->type != CPP_COMMA) break; c_parser_consume_token (parser); } if (!parens.require_close (parser)) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return error_expr; } if (!match_found) { error_at (selector_loc, "%<_Generic%> selector of type %qT is not " "compatible with any association", selector_type); return error_expr; } return matched_assoc.expression; } /* Check the validity of a function pointer argument *EXPR (argument position POS) to __builtin_tgmath. Return the number of function arguments if possibly valid; return 0 having reported an error if not valid. */ static unsigned int check_tgmath_function (c_expr *expr, unsigned int pos) { tree type = TREE_TYPE (expr->value); if (!FUNCTION_POINTER_TYPE_P (type)) { error_at (expr->get_location (), "argument %u of %<__builtin_tgmath%> is not a function pointer", pos); return 0; } type = TREE_TYPE (type); if (!prototype_p (type)) { error_at (expr->get_location (), "argument %u of %<__builtin_tgmath%> is unprototyped", pos); return 0; } if (stdarg_p (type)) { error_at (expr->get_location (), "argument %u of %<__builtin_tgmath%> has variable arguments", pos); return 0; } unsigned int nargs = 0; function_args_iterator iter; tree t; FOREACH_FUNCTION_ARGS (type, t, iter) { if (t == void_type_node) break; nargs++; } if (nargs == 0) { error_at (expr->get_location (), "argument %u of %<__builtin_tgmath%> has no arguments", pos); return 0; } return nargs; } /* Ways in which a parameter or return value of a type-generic macro may vary between the different functions the macro may call. */ enum tgmath_parm_kind { tgmath_fixed, tgmath_real, tgmath_complex }; /* Parse a postfix expression (C90 6.3.1-6.3.2, C99 6.5.1-6.5.2, C11 6.5.1-6.5.2). Compound literals aren't handled here; callers have to call c_parser_postfix_expression_after_paren_type on encountering them. postfix-expression: primary-expression postfix-expression [ expression ] postfix-expression ( argument-expression-list[opt] ) postfix-expression . identifier postfix-expression -> identifier postfix-expression ++ postfix-expression -- ( type-name ) { initializer-list } ( type-name ) { initializer-list , } argument-expression-list: argument-expression argument-expression-list , argument-expression primary-expression: identifier constant string-literal ( expression ) generic-selection GNU extensions: primary-expression: __func__ (treated as a keyword in GNU C) __FUNCTION__ __PRETTY_FUNCTION__ ( compound-statement ) __builtin_va_arg ( assignment-expression , type-name ) __builtin_offsetof ( type-name , offsetof-member-designator ) __builtin_choose_expr ( assignment-expression , assignment-expression , assignment-expression ) __builtin_types_compatible_p ( type-name , type-name ) __builtin_tgmath ( expr-list ) __builtin_complex ( assignment-expression , assignment-expression ) __builtin_shuffle ( assignment-expression , assignment-expression ) __builtin_shuffle ( assignment-expression , assignment-expression , assignment-expression, ) offsetof-member-designator: identifier offsetof-member-designator . identifier offsetof-member-designator [ expression ] Objective-C: primary-expression: [ objc-receiver objc-message-args ] @selector ( objc-selector-arg ) @protocol ( identifier ) @encode ( type-name ) objc-string-literal Classname . identifier */ static struct c_expr c_parser_postfix_expression (c_parser *parser) { struct c_expr expr, e1; struct c_type_name *t1, *t2; location_t loc = c_parser_peek_token (parser)->location; source_range tok_range = c_parser_peek_token (parser)->get_range (); expr.original_code = ERROR_MARK; expr.original_type = NULL; switch (c_parser_peek_token (parser)->type) { case CPP_NUMBER: expr.value = c_parser_peek_token (parser)->value; set_c_expr_source_range (&expr, tok_range); loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); if (TREE_CODE (expr.value) == FIXED_CST && !targetm.fixed_point_supported_p ()) { error_at (loc, "fixed-point types not supported for this target"); expr.set_error (); } break; case CPP_CHAR: case CPP_CHAR16: case CPP_CHAR32: case CPP_WCHAR: expr.value = c_parser_peek_token (parser)->value; /* For the purpose of warning when a pointer is compared with a zero character constant. */ expr.original_type = char_type_node; set_c_expr_source_range (&expr, tok_range); c_parser_consume_token (parser); break; case CPP_STRING: case CPP_STRING16: case CPP_STRING32: case CPP_WSTRING: case CPP_UTF8STRING: expr.value = c_parser_peek_token (parser)->value; set_c_expr_source_range (&expr, tok_range); expr.original_code = STRING_CST; c_parser_consume_token (parser); break; case CPP_OBJC_STRING: gcc_assert (c_dialect_objc ()); expr.value = objc_build_string_object (c_parser_peek_token (parser)->value); set_c_expr_source_range (&expr, tok_range); c_parser_consume_token (parser); break; case CPP_NAME: switch (c_parser_peek_token (parser)->id_kind) { case C_ID_ID: { tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); expr.value = build_external_ref (loc, id, (c_parser_peek_token (parser)->type == CPP_OPEN_PAREN), &expr.original_type); set_c_expr_source_range (&expr, tok_range); break; } case C_ID_CLASSNAME: { /* Here we parse the Objective-C 2.0 Class.name dot syntax. */ tree class_name = c_parser_peek_token (parser)->value; tree component; c_parser_consume_token (parser); gcc_assert (c_dialect_objc ()); if (!c_parser_require (parser, CPP_DOT, "expected %<.%>")) { expr.set_error (); break; } if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); expr.set_error (); break; } c_token *component_tok = c_parser_peek_token (parser); component = component_tok->value; location_t end_loc = component_tok->get_finish (); c_parser_consume_token (parser); expr.value = objc_build_class_component_ref (class_name, component); set_c_expr_source_range (&expr, loc, end_loc); break; } default: c_parser_error (parser, "expected expression"); expr.set_error (); break; } break; case CPP_OPEN_PAREN: /* A parenthesized expression, statement expression or compound literal. */ if (c_parser_peek_2nd_token (parser)->type == CPP_OPEN_BRACE) { /* A statement expression. */ tree stmt; location_t brace_loc; c_parser_consume_token (parser); brace_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); if (!building_stmt_list_p ()) { error_at (loc, "braced-group within expression allowed " "only inside a function"); parser->error = true; c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.set_error (); break; } stmt = c_begin_stmt_expr (); c_parser_compound_statement_nostart (parser); location_t close_loc = c_parser_peek_token (parser)->location; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); pedwarn (loc, OPT_Wpedantic, "ISO C forbids braced-groups within expressions"); expr.value = c_finish_stmt_expr (brace_loc, stmt); set_c_expr_source_range (&expr, loc, close_loc); mark_exp_read (expr.value); } else { /* A parenthesized expression. */ location_t loc_open_paren = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); expr = c_parser_expression (parser); if (TREE_CODE (expr.value) == MODIFY_EXPR) TREE_NO_WARNING (expr.value) = 1; if (expr.original_code != C_MAYBE_CONST_EXPR && expr.original_code != SIZEOF_EXPR) expr.original_code = ERROR_MARK; /* Don't change EXPR.ORIGINAL_TYPE. */ location_t loc_close_paren = c_parser_peek_token (parser)->location; set_c_expr_source_range (&expr, loc_open_paren, loc_close_paren); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>", loc_open_paren); } break; case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_FUNCTION_NAME: pedwarn (loc, OPT_Wpedantic, "ISO C does not support " "%<__FUNCTION__%> predefined identifier"); expr.value = fname_decl (loc, c_parser_peek_token (parser)->keyword, c_parser_peek_token (parser)->value); set_c_expr_source_range (&expr, loc, loc); c_parser_consume_token (parser); break; case RID_PRETTY_FUNCTION_NAME: pedwarn (loc, OPT_Wpedantic, "ISO C does not support " "%<__PRETTY_FUNCTION__%> predefined identifier"); expr.value = fname_decl (loc, c_parser_peek_token (parser)->keyword, c_parser_peek_token (parser)->value); set_c_expr_source_range (&expr, loc, loc); c_parser_consume_token (parser); break; case RID_C99_FUNCTION_NAME: pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support " "%<__func__%> predefined identifier"); expr.value = fname_decl (loc, c_parser_peek_token (parser)->keyword, c_parser_peek_token (parser)->value); set_c_expr_source_range (&expr, loc, loc); c_parser_consume_token (parser); break; case RID_VA_ARG: { location_t start_loc = loc; c_parser_consume_token (parser); matching_parens parens; if (!parens.require_open (parser)) { expr.set_error (); break; } e1 = c_parser_expr_no_commas (parser, NULL); mark_exp_read (e1.value); e1.value = c_fully_fold (e1.value, false, NULL); if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.set_error (); break; } loc = c_parser_peek_token (parser)->location; t1 = c_parser_type_name (parser); location_t end_loc = c_parser_peek_token (parser)->get_finish (); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (t1 == NULL) { expr.set_error (); } else { tree type_expr = NULL_TREE; expr.value = c_build_va_arg (start_loc, e1.value, loc, groktypename (t1, &type_expr, NULL)); if (type_expr) { expr.value = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (expr.value), type_expr, expr.value); C_MAYBE_CONST_EXPR_NON_CONST (expr.value) = true; } set_c_expr_source_range (&expr, start_loc, end_loc); } } break; case RID_OFFSETOF: { c_parser_consume_token (parser); matching_parens parens; if (!parens.require_open (parser)) { expr.set_error (); break; } t1 = c_parser_type_name (parser); if (t1 == NULL) parser->error = true; if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) gcc_assert (parser->error); if (parser->error) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.set_error (); break; } tree type = groktypename (t1, NULL, NULL); tree offsetof_ref; if (type == error_mark_node) offsetof_ref = error_mark_node; else { offsetof_ref = build1 (INDIRECT_REF, type, null_pointer_node); SET_EXPR_LOCATION (offsetof_ref, loc); } /* Parse the second argument to __builtin_offsetof. We must have one identifier, and beyond that we want to accept sub structure and sub array references. */ if (c_parser_next_token_is (parser, CPP_NAME)) { c_token *comp_tok = c_parser_peek_token (parser); offsetof_ref = build_component_ref (loc, offsetof_ref, comp_tok->value, comp_tok->location); c_parser_consume_token (parser); while (c_parser_next_token_is (parser, CPP_DOT) || c_parser_next_token_is (parser, CPP_OPEN_SQUARE) || c_parser_next_token_is (parser, CPP_DEREF)) { if (c_parser_next_token_is (parser, CPP_DEREF)) { loc = c_parser_peek_token (parser)->location; offsetof_ref = build_array_ref (loc, offsetof_ref, integer_zero_node); goto do_dot; } else if (c_parser_next_token_is (parser, CPP_DOT)) { do_dot: c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } c_token *comp_tok = c_parser_peek_token (parser); offsetof_ref = build_component_ref (loc, offsetof_ref, comp_tok->value, comp_tok->location); c_parser_consume_token (parser); } else { struct c_expr ce; tree idx; loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); ce = c_parser_expression (parser); ce = convert_lvalue_to_rvalue (loc, ce, false, false); idx = ce.value; idx = c_fully_fold (idx, false, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); offsetof_ref = build_array_ref (loc, offsetof_ref, idx); } } } else c_parser_error (parser, "expected identifier"); location_t end_loc = c_parser_peek_token (parser)->get_finish (); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); expr.value = fold_offsetof (offsetof_ref); set_c_expr_source_range (&expr, loc, end_loc); } break; case RID_CHOOSE_EXPR: { vec<c_expr_t, va_gc> *cexpr_list; c_expr_t *e1_p, *e2_p, *e3_p; tree c; location_t close_paren_loc; c_parser_consume_token (parser); if (!c_parser_get_builtin_args (parser, "__builtin_choose_expr", &cexpr_list, true, &close_paren_loc)) { expr.set_error (); break; } if (vec_safe_length (cexpr_list) != 3) { error_at (loc, "wrong number of arguments to " "%<__builtin_choose_expr%>"); expr.set_error (); break; } e1_p = &(*cexpr_list)[0]; e2_p = &(*cexpr_list)[1]; e3_p = &(*cexpr_list)[2]; c = e1_p->value; mark_exp_read (e2_p->value); mark_exp_read (e3_p->value); if (TREE_CODE (c) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (c))) error_at (loc, "first argument to %<__builtin_choose_expr%> not" " a constant"); constant_expression_warning (c); expr = integer_zerop (c) ? *e3_p : *e2_p; set_c_expr_source_range (&expr, loc, close_paren_loc); break; } case RID_TYPES_COMPATIBLE_P: { c_parser_consume_token (parser); matching_parens parens; if (!parens.require_open (parser)) { expr.set_error (); break; } t1 = c_parser_type_name (parser); if (t1 == NULL) { expr.set_error (); break; } if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.set_error (); break; } t2 = c_parser_type_name (parser); if (t2 == NULL) { expr.set_error (); break; } location_t close_paren_loc = c_parser_peek_token (parser)->location; parens.skip_until_found_close (parser); tree e1, e2; e1 = groktypename (t1, NULL, NULL); e2 = groktypename (t2, NULL, NULL); if (e1 == error_mark_node || e2 == error_mark_node) { expr.set_error (); break; } e1 = TYPE_MAIN_VARIANT (e1); e2 = TYPE_MAIN_VARIANT (e2); expr.value = comptypes (e1, e2) ? integer_one_node : integer_zero_node; set_c_expr_source_range (&expr, loc, close_paren_loc); } break; case RID_BUILTIN_TGMATH: { vec<c_expr_t, va_gc> *cexpr_list; location_t close_paren_loc; c_parser_consume_token (parser); if (!c_parser_get_builtin_args (parser, "__builtin_tgmath", &cexpr_list, false, &close_paren_loc)) { expr.set_error (); break; } if (vec_safe_length (cexpr_list) < 3) { error_at (loc, "too few arguments to %<__builtin_tgmath%>"); expr.set_error (); break; } unsigned int i; c_expr_t *p; FOR_EACH_VEC_ELT (*cexpr_list, i, p) *p = convert_lvalue_to_rvalue (loc, *p, true, true); unsigned int nargs = check_tgmath_function (&(*cexpr_list)[0], 1); if (nargs == 0) { expr.set_error (); break; } if (vec_safe_length (cexpr_list) < nargs) { error_at (loc, "too few arguments to %<__builtin_tgmath%>"); expr.set_error (); break; } unsigned int num_functions = vec_safe_length (cexpr_list) - nargs; if (num_functions < 2) { error_at (loc, "too few arguments to %<__builtin_tgmath%>"); expr.set_error (); break; } /* The first NUM_FUNCTIONS expressions are the function pointers. The remaining NARGS expressions are the arguments that are to be passed to one of those functions, chosen following <tgmath.h> rules. */ for (unsigned int j = 1; j < num_functions; j++) { unsigned int this_nargs = check_tgmath_function (&(*cexpr_list)[j], j + 1); if (this_nargs == 0) { expr.set_error (); goto out; } if (this_nargs != nargs) { error_at ((*cexpr_list)[j].get_location (), "argument %u of %<__builtin_tgmath%> has " "wrong number of arguments", j + 1); expr.set_error (); goto out; } } /* The functions all have the same number of arguments. Determine whether arguments and return types vary in ways permitted for <tgmath.h> functions. */ /* The first entry in each of these vectors is for the return type, subsequent entries for parameter types. */ auto_vec<enum tgmath_parm_kind> parm_kind (nargs + 1); auto_vec<tree> parm_first (nargs + 1); auto_vec<bool> parm_complex (nargs + 1); auto_vec<bool> parm_varies (nargs + 1); tree first_type = TREE_TYPE (TREE_TYPE ((*cexpr_list)[0].value)); tree first_ret = TYPE_MAIN_VARIANT (TREE_TYPE (first_type)); parm_first.quick_push (first_ret); parm_complex.quick_push (TREE_CODE (first_ret) == COMPLEX_TYPE); parm_varies.quick_push (false); function_args_iterator iter; tree t; unsigned int argpos; FOREACH_FUNCTION_ARGS (first_type, t, iter) { if (t == void_type_node) break; parm_first.quick_push (TYPE_MAIN_VARIANT (t)); parm_complex.quick_push (TREE_CODE (t) == COMPLEX_TYPE); parm_varies.quick_push (false); } for (unsigned int j = 1; j < num_functions; j++) { tree type = TREE_TYPE (TREE_TYPE ((*cexpr_list)[j].value)); tree ret = TYPE_MAIN_VARIANT (TREE_TYPE (type)); if (ret != parm_first[0]) { parm_varies[0] = true; if (!SCALAR_FLOAT_TYPE_P (parm_first[0]) && !COMPLEX_FLOAT_TYPE_P (parm_first[0])) { error_at ((*cexpr_list)[0].get_location (), "invalid type-generic return type for " "argument %u of %<__builtin_tgmath%>", 1); expr.set_error (); goto out; } if (!SCALAR_FLOAT_TYPE_P (ret) && !COMPLEX_FLOAT_TYPE_P (ret)) { error_at ((*cexpr_list)[j].get_location (), "invalid type-generic return type for " "argument %u of %<__builtin_tgmath%>", j + 1); expr.set_error (); goto out; } } if (TREE_CODE (ret) == COMPLEX_TYPE) parm_complex[0] = true; argpos = 1; FOREACH_FUNCTION_ARGS (type, t, iter) { if (t == void_type_node) break; t = TYPE_MAIN_VARIANT (t); if (t != parm_first[argpos]) { parm_varies[argpos] = true; if (!SCALAR_FLOAT_TYPE_P (parm_first[argpos]) && !COMPLEX_FLOAT_TYPE_P (parm_first[argpos])) { error_at ((*cexpr_list)[0].get_location (), "invalid type-generic type for " "argument %u of argument %u of " "%<__builtin_tgmath%>", argpos, 1); expr.set_error (); goto out; } if (!SCALAR_FLOAT_TYPE_P (t) && !COMPLEX_FLOAT_TYPE_P (t)) { error_at ((*cexpr_list)[j].get_location (), "invalid type-generic type for " "argument %u of argument %u of " "%<__builtin_tgmath%>", argpos, j + 1); expr.set_error (); goto out; } } if (TREE_CODE (t) == COMPLEX_TYPE) parm_complex[argpos] = true; argpos++; } } enum tgmath_parm_kind max_variation = tgmath_fixed; for (unsigned int j = 0; j <= nargs; j++) { enum tgmath_parm_kind this_kind; if (parm_varies[j]) { if (parm_complex[j]) max_variation = this_kind = tgmath_complex; else { this_kind = tgmath_real; if (max_variation != tgmath_complex) max_variation = tgmath_real; } } else this_kind = tgmath_fixed; parm_kind.quick_push (this_kind); } if (max_variation == tgmath_fixed) { error_at (loc, "function arguments of %<__builtin_tgmath%> " "all have the same type"); expr.set_error (); break; } /* Identify a parameter (not the return type) that varies, including with complex types if any variation includes complex types; there must be at least one such parameter. */ unsigned int tgarg = 0; for (unsigned int j = 1; j <= nargs; j++) if (parm_kind[j] == max_variation) { tgarg = j; break; } if (tgarg == 0) { error_at (loc, "function arguments of %<__builtin_tgmath%> " "lack type-generic parameter"); expr.set_error (); break; } /* Determine the type of the relevant parameter for each function. */ auto_vec<tree> tg_type (num_functions); for (unsigned int j = 0; j < num_functions; j++) { tree type = TREE_TYPE (TREE_TYPE ((*cexpr_list)[j].value)); argpos = 1; FOREACH_FUNCTION_ARGS (type, t, iter) { if (argpos == tgarg) { tg_type.quick_push (TYPE_MAIN_VARIANT (t)); break; } argpos++; } } /* Verify that the corresponding types are different for all the listed functions. Also determine whether all the types are complex, whether all the types are standard or binary, and whether all the types are decimal. */ bool all_complex = true; bool all_binary = true; bool all_decimal = true; hash_set<tree> tg_types; FOR_EACH_VEC_ELT (tg_type, i, t) { if (TREE_CODE (t) == COMPLEX_TYPE) all_decimal = false; else { all_complex = false; if (DECIMAL_FLOAT_TYPE_P (t)) all_binary = false; else all_decimal = false; } if (tg_types.add (t)) { error_at ((*cexpr_list)[i].get_location (), "duplicate type-generic parameter type for " "function argument %u of %<__builtin_tgmath%>", i + 1); expr.set_error (); goto out; } } /* Verify that other parameters and the return type whose types vary have their types varying in the correct way. */ for (unsigned int j = 0; j < num_functions; j++) { tree exp_type = tg_type[j]; tree exp_real_type = exp_type; if (TREE_CODE (exp_type) == COMPLEX_TYPE) exp_real_type = TREE_TYPE (exp_type); tree type = TREE_TYPE (TREE_TYPE ((*cexpr_list)[j].value)); tree ret = TYPE_MAIN_VARIANT (TREE_TYPE (type)); if ((parm_kind[0] == tgmath_complex && ret != exp_type) || (parm_kind[0] == tgmath_real && ret != exp_real_type)) { error_at ((*cexpr_list)[j].get_location (), "bad return type for function argument %u " "of %<__builtin_tgmath%>", j + 1); expr.set_error (); goto out; } argpos = 1; FOREACH_FUNCTION_ARGS (type, t, iter) { if (t == void_type_node) break; t = TYPE_MAIN_VARIANT (t); if ((parm_kind[argpos] == tgmath_complex && t != exp_type) || (parm_kind[argpos] == tgmath_real && t != exp_real_type)) { error_at ((*cexpr_list)[j].get_location (), "bad type for argument %u of " "function argument %u of " "%<__builtin_tgmath%>", argpos, j + 1); expr.set_error (); goto out; } argpos++; } } /* The functions listed are a valid set of functions for a <tgmath.h> macro to select between. Identify the matching function, if any. First, the argument types must be combined following <tgmath.h> rules. Integer types are treated as _Decimal64 if any type-generic argument is decimal, or if the only alternatives for type-generic arguments are of decimal types, and are otherwise treated as double (or _Complex double for complex integer types, or _Float64 or _Complex _Float64 if all the return types are the same _FloatN or _FloatNx type). After that adjustment, types are combined following the usual arithmetic conversions. If the function only accepts complex arguments, a complex type is produced. */ bool arg_complex = all_complex; bool arg_binary = all_binary; bool arg_int_decimal = all_decimal; for (unsigned int j = 1; j <= nargs; j++) { if (parm_kind[j] == tgmath_fixed) continue; c_expr_t *ce = &(*cexpr_list)[num_functions + j - 1]; tree type = TREE_TYPE (ce->value); if (!INTEGRAL_TYPE_P (type) && !SCALAR_FLOAT_TYPE_P (type) && TREE_CODE (type) != COMPLEX_TYPE) { error_at (ce->get_location (), "invalid type of argument %u of type-generic " "function", j); expr.set_error (); goto out; } if (DECIMAL_FLOAT_TYPE_P (type)) { arg_int_decimal = true; if (all_complex) { error_at (ce->get_location (), "decimal floating-point argument %u to " "complex-only type-generic function", j); expr.set_error (); goto out; } else if (all_binary) { error_at (ce->get_location (), "decimal floating-point argument %u to " "binary-only type-generic function", j); expr.set_error (); goto out; } else if (arg_complex) { error_at (ce->get_location (), "both complex and decimal floating-point " "arguments to type-generic function"); expr.set_error (); goto out; } else if (arg_binary) { error_at (ce->get_location (), "both binary and decimal floating-point " "arguments to type-generic function"); expr.set_error (); goto out; } } else if (TREE_CODE (type) == COMPLEX_TYPE) { arg_complex = true; if (COMPLEX_FLOAT_TYPE_P (type)) arg_binary = true; if (all_decimal) { error_at (ce->get_location (), "complex argument %u to " "decimal-only type-generic function", j); expr.set_error (); goto out; } else if (arg_int_decimal) { error_at (ce->get_location (), "both complex and decimal floating-point " "arguments to type-generic function"); expr.set_error (); goto out; } } else if (SCALAR_FLOAT_TYPE_P (type)) { arg_binary = true; if (all_decimal) { error_at (ce->get_location (), "binary argument %u to " "decimal-only type-generic function", j); expr.set_error (); goto out; } else if (arg_int_decimal) { error_at (ce->get_location (), "both binary and decimal floating-point " "arguments to type-generic function"); expr.set_error (); goto out; } } } /* For a macro rounding its result to a narrower type, map integer types to _Float64 not double if the return type is a _FloatN or _FloatNx type. */ bool arg_int_float64 = false; if (parm_kind[0] == tgmath_fixed && SCALAR_FLOAT_TYPE_P (parm_first[0]) && float64_type_node != NULL_TREE) for (unsigned int j = 0; j < NUM_FLOATN_NX_TYPES; j++) if (parm_first[0] == FLOATN_TYPE_NODE (j)) { arg_int_float64 = true; break; } tree arg_real = NULL_TREE; for (unsigned int j = 1; j <= nargs; j++) { if (parm_kind[j] == tgmath_fixed) continue; c_expr_t *ce = &(*cexpr_list)[num_functions + j - 1]; tree type = TYPE_MAIN_VARIANT (TREE_TYPE (ce->value)); if (TREE_CODE (type) == COMPLEX_TYPE) type = TREE_TYPE (type); if (INTEGRAL_TYPE_P (type)) type = (arg_int_decimal ? dfloat64_type_node : arg_int_float64 ? float64_type_node : double_type_node); if (arg_real == NULL_TREE) arg_real = type; else arg_real = common_type (arg_real, type); if (arg_real == error_mark_node) { expr.set_error (); goto out; } } tree arg_type = (arg_complex ? build_complex_type (arg_real) : arg_real); /* Look for a function to call with type-generic parameter type ARG_TYPE. */ c_expr_t *fn = NULL; for (unsigned int j = 0; j < num_functions; j++) { if (tg_type[j] == arg_type) { fn = &(*cexpr_list)[j]; break; } } if (fn == NULL && parm_kind[0] == tgmath_fixed && SCALAR_FLOAT_TYPE_P (parm_first[0])) { /* Presume this is a macro that rounds its result to a narrower type, and look for the first function with at least the range and precision of the argument type. */ for (unsigned int j = 0; j < num_functions; j++) { if (arg_complex != (TREE_CODE (tg_type[j]) == COMPLEX_TYPE)) continue; tree real_tg_type = (arg_complex ? TREE_TYPE (tg_type[j]) : tg_type[j]); if (DECIMAL_FLOAT_TYPE_P (arg_real) != DECIMAL_FLOAT_TYPE_P (real_tg_type)) continue; scalar_float_mode arg_mode = SCALAR_FLOAT_TYPE_MODE (arg_real); scalar_float_mode tg_mode = SCALAR_FLOAT_TYPE_MODE (real_tg_type); const real_format *arg_fmt = REAL_MODE_FORMAT (arg_mode); const real_format *tg_fmt = REAL_MODE_FORMAT (tg_mode); if (arg_fmt->b == tg_fmt->b && arg_fmt->p <= tg_fmt->p && arg_fmt->emax <= tg_fmt->emax && (arg_fmt->emin - arg_fmt->p >= tg_fmt->emin - tg_fmt->p)) { fn = &(*cexpr_list)[j]; break; } } } if (fn == NULL) { error_at (loc, "no matching function for type-generic call"); expr.set_error (); break; } /* Construct a call to FN. */ vec<tree, va_gc> *args; vec_alloc (args, nargs); vec<tree, va_gc> *origtypes; vec_alloc (origtypes, nargs); auto_vec<location_t> arg_loc (nargs); for (unsigned int j = 0; j < nargs; j++) { c_expr_t *ce = &(*cexpr_list)[num_functions + j]; args->quick_push (ce->value); arg_loc.quick_push (ce->get_location ()); origtypes->quick_push (ce->original_type); } expr.value = c_build_function_call_vec (loc, arg_loc, fn->value, args, origtypes); set_c_expr_source_range (&expr, loc, close_paren_loc); break; } case RID_BUILTIN_CALL_WITH_STATIC_CHAIN: { vec<c_expr_t, va_gc> *cexpr_list; c_expr_t *e2_p; tree chain_value; location_t close_paren_loc; c_parser_consume_token (parser); if (!c_parser_get_builtin_args (parser, "__builtin_call_with_static_chain", &cexpr_list, false, &close_paren_loc)) { expr.set_error (); break; } if (vec_safe_length (cexpr_list) != 2) { error_at (loc, "wrong number of arguments to " "%<__builtin_call_with_static_chain%>"); expr.set_error (); break; } expr = (*cexpr_list)[0]; e2_p = &(*cexpr_list)[1]; *e2_p = convert_lvalue_to_rvalue (loc, *e2_p, true, true); chain_value = e2_p->value; mark_exp_read (chain_value); if (TREE_CODE (expr.value) != CALL_EXPR) error_at (loc, "first argument to " "%<__builtin_call_with_static_chain%> " "must be a call expression"); else if (TREE_CODE (TREE_TYPE (chain_value)) != POINTER_TYPE) error_at (loc, "second argument to " "%<__builtin_call_with_static_chain%> " "must be a pointer type"); else CALL_EXPR_STATIC_CHAIN (expr.value) = chain_value; set_c_expr_source_range (&expr, loc, close_paren_loc); break; } case RID_BUILTIN_COMPLEX: { vec<c_expr_t, va_gc> *cexpr_list; c_expr_t *e1_p, *e2_p; location_t close_paren_loc; c_parser_consume_token (parser); if (!c_parser_get_builtin_args (parser, "__builtin_complex", &cexpr_list, false, &close_paren_loc)) { expr.set_error (); break; } if (vec_safe_length (cexpr_list) != 2) { error_at (loc, "wrong number of arguments to " "%<__builtin_complex%>"); expr.set_error (); break; } e1_p = &(*cexpr_list)[0]; e2_p = &(*cexpr_list)[1]; *e1_p = convert_lvalue_to_rvalue (loc, *e1_p, true, true); if (TREE_CODE (e1_p->value) == EXCESS_PRECISION_EXPR) e1_p->value = convert (TREE_TYPE (e1_p->value), TREE_OPERAND (e1_p->value, 0)); *e2_p = convert_lvalue_to_rvalue (loc, *e2_p, true, true); if (TREE_CODE (e2_p->value) == EXCESS_PRECISION_EXPR) e2_p->value = convert (TREE_TYPE (e2_p->value), TREE_OPERAND (e2_p->value, 0)); if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (e1_p->value)) || DECIMAL_FLOAT_TYPE_P (TREE_TYPE (e1_p->value)) || !SCALAR_FLOAT_TYPE_P (TREE_TYPE (e2_p->value)) || DECIMAL_FLOAT_TYPE_P (TREE_TYPE (e2_p->value))) { error_at (loc, "%<__builtin_complex%> operand " "not of real binary floating-point type"); expr.set_error (); break; } if (TYPE_MAIN_VARIANT (TREE_TYPE (e1_p->value)) != TYPE_MAIN_VARIANT (TREE_TYPE (e2_p->value))) { error_at (loc, "%<__builtin_complex%> operands of different types"); expr.set_error (); break; } pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support complex types"); expr.value = build2_loc (loc, COMPLEX_EXPR, build_complex_type (TYPE_MAIN_VARIANT (TREE_TYPE (e1_p->value))), e1_p->value, e2_p->value); set_c_expr_source_range (&expr, loc, close_paren_loc); break; } case RID_BUILTIN_SHUFFLE: { vec<c_expr_t, va_gc> *cexpr_list; unsigned int i; c_expr_t *p; location_t close_paren_loc; c_parser_consume_token (parser); if (!c_parser_get_builtin_args (parser, "__builtin_shuffle", &cexpr_list, false, &close_paren_loc)) { expr.set_error (); break; } FOR_EACH_VEC_SAFE_ELT (cexpr_list, i, p) *p = convert_lvalue_to_rvalue (loc, *p, true, true); if (vec_safe_length (cexpr_list) == 2) expr.value = c_build_vec_perm_expr (loc, (*cexpr_list)[0].value, NULL_TREE, (*cexpr_list)[1].value); else if (vec_safe_length (cexpr_list) == 3) expr.value = c_build_vec_perm_expr (loc, (*cexpr_list)[0].value, (*cexpr_list)[1].value, (*cexpr_list)[2].value); else { error_at (loc, "wrong number of arguments to " "%<__builtin_shuffle%>"); expr.set_error (); } set_c_expr_source_range (&expr, loc, close_paren_loc); break; } case RID_AT_SELECTOR: { gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); matching_parens parens; if (!parens.require_open (parser)) { expr.set_error (); break; } tree sel = c_parser_objc_selector_arg (parser); location_t close_loc = c_parser_peek_token (parser)->location; parens.skip_until_found_close (parser); expr.value = objc_build_selector_expr (loc, sel); set_c_expr_source_range (&expr, loc, close_loc); } break; case RID_AT_PROTOCOL: { gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); matching_parens parens; if (!parens.require_open (parser)) { expr.set_error (); break; } if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.set_error (); break; } tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); location_t close_loc = c_parser_peek_token (parser)->location; parens.skip_until_found_close (parser); expr.value = objc_build_protocol_expr (id); set_c_expr_source_range (&expr, loc, close_loc); } break; case RID_AT_ENCODE: { /* Extension to support C-structures in the archiver. */ gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); matching_parens parens; if (!parens.require_open (parser)) { expr.set_error (); break; } t1 = c_parser_type_name (parser); if (t1 == NULL) { expr.set_error (); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); break; } location_t close_loc = c_parser_peek_token (parser)->location; parens.skip_until_found_close (parser); tree type = groktypename (t1, NULL, NULL); expr.value = objc_build_encode_expr (type); set_c_expr_source_range (&expr, loc, close_loc); } break; case RID_GENERIC: expr = c_parser_generic_selection (parser); break; default: c_parser_error (parser, "expected expression"); expr.set_error (); break; } break; case CPP_OPEN_SQUARE: if (c_dialect_objc ()) { tree receiver, args; c_parser_consume_token (parser); receiver = c_parser_objc_receiver (parser); args = c_parser_objc_message_args (parser); location_t close_loc = c_parser_peek_token (parser)->location; c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); expr.value = objc_build_message_expr (receiver, args); set_c_expr_source_range (&expr, loc, close_loc); break; } /* Else fall through to report error. */ /* FALLTHRU */ default: c_parser_error (parser, "expected expression"); expr.set_error (); break; } out: return c_parser_postfix_expression_after_primary (parser, EXPR_LOC_OR_LOC (expr.value, loc), expr); } /* Parse a postfix expression after a parenthesized type name: the brace-enclosed initializer of a compound literal, possibly followed by some postfix operators. This is separate because it is not possible to tell until after the type name whether a cast expression has a cast or a compound literal, or whether the operand of sizeof is a parenthesized type name or starts with a compound literal. TYPE_LOC is the location where TYPE_NAME starts--the location of the first token after the parentheses around the type name. */ static struct c_expr c_parser_postfix_expression_after_paren_type (c_parser *parser, struct c_type_name *type_name, location_t type_loc) { tree type; struct c_expr init; bool non_const; struct c_expr expr; location_t start_loc; tree type_expr = NULL_TREE; bool type_expr_const = true; check_compound_literal_type (type_loc, type_name); rich_location richloc (line_table, type_loc); start_init (NULL_TREE, NULL, 0, &richloc); type = groktypename (type_name, &type_expr, &type_expr_const); start_loc = c_parser_peek_token (parser)->location; if (type != error_mark_node && C_TYPE_VARIABLE_SIZE (type)) { error_at (type_loc, "compound literal has variable size"); type = error_mark_node; } init = c_parser_braced_init (parser, type, false, NULL); finish_init (); maybe_warn_string_init (type_loc, type, init); if (type != error_mark_node && !ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (type)) && current_function_decl) { error ("compound literal qualified by address-space qualifier"); type = error_mark_node; } pedwarn_c90 (start_loc, OPT_Wpedantic, "ISO C90 forbids compound literals"); non_const = ((init.value && TREE_CODE (init.value) == CONSTRUCTOR) ? CONSTRUCTOR_NON_CONST (init.value) : init.original_code == C_MAYBE_CONST_EXPR); non_const |= !type_expr_const; unsigned int alignas_align = 0; if (type != error_mark_node && type_name->specs->align_log != -1) { alignas_align = 1U << type_name->specs->align_log; if (alignas_align < min_align_of_type (type)) { error_at (type_name->specs->locations[cdw_alignas], "%<_Alignas%> specifiers cannot reduce " "alignment of compound literal"); alignas_align = 0; } } expr.value = build_compound_literal (start_loc, type, init.value, non_const, alignas_align); set_c_expr_source_range (&expr, init.src_range); expr.original_code = ERROR_MARK; expr.original_type = NULL; if (type != error_mark_node && expr.value != error_mark_node && type_expr) { if (TREE_CODE (expr.value) == C_MAYBE_CONST_EXPR) { gcc_assert (C_MAYBE_CONST_EXPR_PRE (expr.value) == NULL_TREE); C_MAYBE_CONST_EXPR_PRE (expr.value) = type_expr; } else { gcc_assert (!non_const); expr.value = build2 (C_MAYBE_CONST_EXPR, type, type_expr, expr.value); } } return c_parser_postfix_expression_after_primary (parser, start_loc, expr); } /* Callback function for sizeof_pointer_memaccess_warning to compare types. */ static bool sizeof_ptr_memacc_comptypes (tree type1, tree type2) { return comptypes (type1, type2) == 1; } /* Parse a postfix expression after the initial primary or compound literal; that is, parse a series of postfix operators. EXPR_LOC is the location of the primary expression. */ static struct c_expr c_parser_postfix_expression_after_primary (c_parser *parser, location_t expr_loc, struct c_expr expr) { struct c_expr orig_expr; tree ident, idx; location_t sizeof_arg_loc[3], comp_loc; tree sizeof_arg[3]; unsigned int literal_zero_mask; unsigned int i; vec<tree, va_gc> *exprlist; vec<tree, va_gc> *origtypes = NULL; vec<location_t> arg_loc = vNULL; location_t start; location_t finish; while (true) { location_t op_loc = c_parser_peek_token (parser)->location; switch (c_parser_peek_token (parser)->type) { case CPP_OPEN_SQUARE: /* Array reference. */ c_parser_consume_token (parser); idx = c_parser_expression (parser).value; c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); start = expr.get_start (); finish = parser->tokens_buf[0].location; expr.value = build_array_ref (op_loc, expr.value, idx); set_c_expr_source_range (&expr, start, finish); expr.original_code = ERROR_MARK; expr.original_type = NULL; break; case CPP_OPEN_PAREN: /* Function call. */ c_parser_consume_token (parser); for (i = 0; i < 3; i++) { sizeof_arg[i] = NULL_TREE; sizeof_arg_loc[i] = UNKNOWN_LOCATION; } literal_zero_mask = 0; if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) exprlist = NULL; else exprlist = c_parser_expr_list (parser, true, false, &origtypes, sizeof_arg_loc, sizeof_arg, &arg_loc, &literal_zero_mask); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); orig_expr = expr; mark_exp_read (expr.value); if (warn_sizeof_pointer_memaccess) sizeof_pointer_memaccess_warning (sizeof_arg_loc, expr.value, exprlist, sizeof_arg, sizeof_ptr_memacc_comptypes); if (TREE_CODE (expr.value) == FUNCTION_DECL && DECL_BUILT_IN_CLASS (expr.value) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (expr.value) == BUILT_IN_MEMSET && vec_safe_length (exprlist) == 3) { tree arg0 = (*exprlist)[0]; tree arg2 = (*exprlist)[2]; warn_for_memset (expr_loc, arg0, arg2, literal_zero_mask); } start = expr.get_start (); finish = parser->tokens_buf[0].get_finish (); expr.value = c_build_function_call_vec (expr_loc, arg_loc, expr.value, exprlist, origtypes); set_c_expr_source_range (&expr, start, finish); expr.original_code = ERROR_MARK; if (TREE_CODE (expr.value) == INTEGER_CST && TREE_CODE (orig_expr.value) == FUNCTION_DECL && DECL_BUILT_IN_CLASS (orig_expr.value) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (orig_expr.value) == BUILT_IN_CONSTANT_P) expr.original_code = C_MAYBE_CONST_EXPR; expr.original_type = NULL; if (exprlist) { release_tree_vector (exprlist); release_tree_vector (origtypes); } arg_loc.release (); break; case CPP_DOT: /* Structure element reference. */ c_parser_consume_token (parser); expr = default_function_array_conversion (expr_loc, expr); if (c_parser_next_token_is (parser, CPP_NAME)) { c_token *comp_tok = c_parser_peek_token (parser); ident = comp_tok->value; comp_loc = comp_tok->location; } else { c_parser_error (parser, "expected identifier"); expr.set_error (); expr.original_code = ERROR_MARK; expr.original_type = NULL; return expr; } start = expr.get_start (); finish = c_parser_peek_token (parser)->get_finish (); c_parser_consume_token (parser); expr.value = build_component_ref (op_loc, expr.value, ident, comp_loc); set_c_expr_source_range (&expr, start, finish); expr.original_code = ERROR_MARK; if (TREE_CODE (expr.value) != COMPONENT_REF) expr.original_type = NULL; else { /* Remember the original type of a bitfield. */ tree field = TREE_OPERAND (expr.value, 1); if (TREE_CODE (field) != FIELD_DECL) expr.original_type = NULL; else expr.original_type = DECL_BIT_FIELD_TYPE (field); } break; case CPP_DEREF: /* Structure element reference. */ c_parser_consume_token (parser); expr = convert_lvalue_to_rvalue (expr_loc, expr, true, false); if (c_parser_next_token_is (parser, CPP_NAME)) { c_token *comp_tok = c_parser_peek_token (parser); ident = comp_tok->value; comp_loc = comp_tok->location; } else { c_parser_error (parser, "expected identifier"); expr.set_error (); expr.original_code = ERROR_MARK; expr.original_type = NULL; return expr; } start = expr.get_start (); finish = c_parser_peek_token (parser)->get_finish (); c_parser_consume_token (parser); expr.value = build_component_ref (op_loc, build_indirect_ref (op_loc, expr.value, RO_ARROW), ident, comp_loc); set_c_expr_source_range (&expr, start, finish); expr.original_code = ERROR_MARK; if (TREE_CODE (expr.value) != COMPONENT_REF) expr.original_type = NULL; else { /* Remember the original type of a bitfield. */ tree field = TREE_OPERAND (expr.value, 1); if (TREE_CODE (field) != FIELD_DECL) expr.original_type = NULL; else expr.original_type = DECL_BIT_FIELD_TYPE (field); } break; case CPP_PLUS_PLUS: /* Postincrement. */ start = expr.get_start (); finish = c_parser_peek_token (parser)->get_finish (); c_parser_consume_token (parser); expr = default_function_array_read_conversion (expr_loc, expr); expr.value = build_unary_op (op_loc, POSTINCREMENT_EXPR, expr.value, false); set_c_expr_source_range (&expr, start, finish); expr.original_code = ERROR_MARK; expr.original_type = NULL; break; case CPP_MINUS_MINUS: /* Postdecrement. */ start = expr.get_start (); finish = c_parser_peek_token (parser)->get_finish (); c_parser_consume_token (parser); expr = default_function_array_read_conversion (expr_loc, expr); expr.value = build_unary_op (op_loc, POSTDECREMENT_EXPR, expr.value, false); set_c_expr_source_range (&expr, start, finish); expr.original_code = ERROR_MARK; expr.original_type = NULL; break; default: return expr; } } } /* Parse an expression (C90 6.3.17, C99 6.5.17, C11 6.5.17). expression: assignment-expression expression , assignment-expression */ static struct c_expr c_parser_expression (c_parser *parser) { location_t tloc = c_parser_peek_token (parser)->location; struct c_expr expr; expr = c_parser_expr_no_commas (parser, NULL); if (c_parser_next_token_is (parser, CPP_COMMA)) expr = convert_lvalue_to_rvalue (tloc, expr, true, false); while (c_parser_next_token_is (parser, CPP_COMMA)) { struct c_expr next; tree lhsval; location_t loc = c_parser_peek_token (parser)->location; location_t expr_loc; c_parser_consume_token (parser); expr_loc = c_parser_peek_token (parser)->location; lhsval = expr.value; while (TREE_CODE (lhsval) == COMPOUND_EXPR) lhsval = TREE_OPERAND (lhsval, 1); if (DECL_P (lhsval) || handled_component_p (lhsval)) mark_exp_read (lhsval); next = c_parser_expr_no_commas (parser, NULL); next = convert_lvalue_to_rvalue (expr_loc, next, true, false); expr.value = build_compound_expr (loc, expr.value, next.value); expr.original_code = COMPOUND_EXPR; expr.original_type = next.original_type; } return expr; } /* Parse an expression and convert functions or arrays to pointers and lvalues to rvalues. */ static struct c_expr c_parser_expression_conv (c_parser *parser) { struct c_expr expr; location_t loc = c_parser_peek_token (parser)->location; expr = c_parser_expression (parser); expr = convert_lvalue_to_rvalue (loc, expr, true, false); return expr; } /* Helper function of c_parser_expr_list. Check if IDXth (0 based) argument is a literal zero alone and if so, set it in literal_zero_mask. */ static inline void c_parser_check_literal_zero (c_parser *parser, unsigned *literal_zero_mask, unsigned int idx) { if (idx >= HOST_BITS_PER_INT) return; c_token *tok = c_parser_peek_token (parser); switch (tok->type) { case CPP_NUMBER: case CPP_CHAR: case CPP_WCHAR: case CPP_CHAR16: case CPP_CHAR32: /* If a parameter is literal zero alone, remember it for -Wmemset-transposed-args warning. */ if (integer_zerop (tok->value) && !TREE_OVERFLOW (tok->value) && (c_parser_peek_2nd_token (parser)->type == CPP_COMMA || c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_PAREN)) *literal_zero_mask |= 1U << idx; default: break; } } /* Parse a non-empty list of expressions. If CONVERT_P, convert functions and arrays to pointers and lvalues to rvalues. If FOLD_P, fold the expressions. If LOCATIONS is non-NULL, save the locations of function arguments into this vector. nonempty-expr-list: assignment-expression nonempty-expr-list , assignment-expression */ static vec<tree, va_gc> * c_parser_expr_list (c_parser *parser, bool convert_p, bool fold_p, vec<tree, va_gc> **p_orig_types, location_t *sizeof_arg_loc, tree *sizeof_arg, vec<location_t> *locations, unsigned int *literal_zero_mask) { vec<tree, va_gc> *ret; vec<tree, va_gc> *orig_types; struct c_expr expr; unsigned int idx = 0; ret = make_tree_vector (); if (p_orig_types == NULL) orig_types = NULL; else orig_types = make_tree_vector (); if (literal_zero_mask) c_parser_check_literal_zero (parser, literal_zero_mask, 0); expr = c_parser_expr_no_commas (parser, NULL); if (convert_p) expr = convert_lvalue_to_rvalue (expr.get_location (), expr, true, true); if (fold_p) expr.value = c_fully_fold (expr.value, false, NULL); ret->quick_push (expr.value); if (orig_types) orig_types->quick_push (expr.original_type); if (locations) locations->safe_push (expr.get_location ()); if (sizeof_arg != NULL && expr.original_code == SIZEOF_EXPR) { sizeof_arg[0] = c_last_sizeof_arg; sizeof_arg_loc[0] = c_last_sizeof_loc; } while (c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); if (literal_zero_mask) c_parser_check_literal_zero (parser, literal_zero_mask, idx + 1); expr = c_parser_expr_no_commas (parser, NULL); if (convert_p) expr = convert_lvalue_to_rvalue (expr.get_location (), expr, true, true); if (fold_p) expr.value = c_fully_fold (expr.value, false, NULL); vec_safe_push (ret, expr.value); if (orig_types) vec_safe_push (orig_types, expr.original_type); if (locations) locations->safe_push (expr.get_location ()); if (++idx < 3 && sizeof_arg != NULL && expr.original_code == SIZEOF_EXPR) { sizeof_arg[idx] = c_last_sizeof_arg; sizeof_arg_loc[idx] = c_last_sizeof_loc; } } if (orig_types) *p_orig_types = orig_types; return ret; } /* Parse Objective-C-specific constructs. */ /* Parse an objc-class-definition. objc-class-definition: @interface identifier objc-superclass[opt] objc-protocol-refs[opt] objc-class-instance-variables[opt] objc-methodprotolist @end @implementation identifier objc-superclass[opt] objc-class-instance-variables[opt] @interface identifier ( identifier ) objc-protocol-refs[opt] objc-methodprotolist @end @interface identifier ( ) objc-protocol-refs[opt] objc-methodprotolist @end @implementation identifier ( identifier ) objc-superclass: : identifier "@interface identifier (" must start "@interface identifier ( identifier ) ...": objc-methodprotolist in the first production may not start with a parenthesized identifier as a declarator of a data definition with no declaration specifiers if the objc-superclass, objc-protocol-refs and objc-class-instance-variables are omitted. */ static void c_parser_objc_class_definition (c_parser *parser, tree attributes) { bool iface_p; tree id1; tree superclass; if (c_parser_next_token_is_keyword (parser, RID_AT_INTERFACE)) iface_p = true; else if (c_parser_next_token_is_keyword (parser, RID_AT_IMPLEMENTATION)) iface_p = false; else gcc_unreachable (); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return; } id1 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { /* We have a category or class extension. */ tree id2; tree proto = NULL_TREE; matching_parens parens; parens.consume_open (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { if (iface_p && c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { /* We have a class extension. */ id2 = NULL_TREE; } else { c_parser_error (parser, "expected identifier or %<)%>"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return; } } else { id2 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } parens.skip_until_found_close (parser); if (!iface_p) { objc_start_category_implementation (id1, id2); return; } if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); objc_start_category_interface (id1, id2, proto, attributes); c_parser_objc_methodprotolist (parser); c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>"); objc_finish_interface (); return; } if (c_parser_next_token_is (parser, CPP_COLON)) { c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return; } superclass = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else superclass = NULL_TREE; if (iface_p) { tree proto = NULL_TREE; if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); objc_start_class_interface (id1, superclass, proto, attributes); } else objc_start_class_implementation (id1, superclass); if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) c_parser_objc_class_instance_variables (parser); if (iface_p) { objc_continue_interface (); c_parser_objc_methodprotolist (parser); c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>"); objc_finish_interface (); } else { objc_continue_implementation (); return; } } /* Parse objc-class-instance-variables. objc-class-instance-variables: { objc-instance-variable-decl-list[opt] } objc-instance-variable-decl-list: objc-visibility-spec objc-instance-variable-decl ; ; objc-instance-variable-decl-list objc-visibility-spec objc-instance-variable-decl-list objc-instance-variable-decl ; objc-instance-variable-decl-list ; objc-visibility-spec: @private @protected @public objc-instance-variable-decl: struct-declaration */ static void c_parser_objc_class_instance_variables (c_parser *parser) { gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE)); c_parser_consume_token (parser); while (c_parser_next_token_is_not (parser, CPP_EOF)) { tree decls; /* Parse any stray semicolon. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic, "extra semicolon"); c_parser_consume_token (parser); continue; } /* Stop if at the end of the instance variables. */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { c_parser_consume_token (parser); break; } /* Parse any objc-visibility-spec. */ if (c_parser_next_token_is_keyword (parser, RID_AT_PRIVATE)) { c_parser_consume_token (parser); objc_set_visibility (OBJC_IVAR_VIS_PRIVATE); continue; } else if (c_parser_next_token_is_keyword (parser, RID_AT_PROTECTED)) { c_parser_consume_token (parser); objc_set_visibility (OBJC_IVAR_VIS_PROTECTED); continue; } else if (c_parser_next_token_is_keyword (parser, RID_AT_PUBLIC)) { c_parser_consume_token (parser); objc_set_visibility (OBJC_IVAR_VIS_PUBLIC); continue; } else if (c_parser_next_token_is_keyword (parser, RID_AT_PACKAGE)) { c_parser_consume_token (parser); objc_set_visibility (OBJC_IVAR_VIS_PACKAGE); continue; } else if (c_parser_next_token_is (parser, CPP_PRAGMA)) { c_parser_pragma (parser, pragma_external, NULL); continue; } /* Parse some comma-separated declarations. */ decls = c_parser_struct_declaration (parser); if (decls == NULL) { /* There is a syntax error. We want to skip the offending tokens up to the next ';' (included) or '}' (excluded). */ /* First, skip manually a ')' or ']'. This is because they reduce the nesting level, so c_parser_skip_until_found() wouldn't be able to skip past them. */ c_token *token = c_parser_peek_token (parser); if (token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE) c_parser_consume_token (parser); /* Then, do the standard skipping. */ c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); /* We hopefully recovered. Start normal parsing again. */ parser->error = false; continue; } else { /* Comma-separated instance variables are chained together in reverse order; add them one by one. */ tree ivar = nreverse (decls); for (; ivar; ivar = DECL_CHAIN (ivar)) objc_add_instance_variable (copy_node (ivar)); } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } } /* Parse an objc-class-declaration. objc-class-declaration: @class identifier-list ; */ static void c_parser_objc_class_declaration (c_parser *parser) { gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_CLASS)); c_parser_consume_token (parser); /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree id; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); parser->error = false; return; } id = c_parser_peek_token (parser)->value; objc_declare_class (id); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* Parse an objc-alias-declaration. objc-alias-declaration: @compatibility_alias identifier identifier ; */ static void c_parser_objc_alias_declaration (c_parser *parser) { tree id1, id2; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_ALIAS)); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); return; } id1 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); return; } id2 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); objc_declare_alias (id1, id2); } /* Parse an objc-protocol-definition. objc-protocol-definition: @protocol identifier objc-protocol-refs[opt] objc-methodprotolist @end @protocol identifier-list ; "@protocol identifier ;" should be resolved as "@protocol identifier-list ;": objc-methodprotolist may not start with a semicolon in the first alternative if objc-protocol-refs are omitted. */ static void c_parser_objc_protocol_definition (c_parser *parser, tree attributes) { gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_PROTOCOL)); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return; } if (c_parser_peek_2nd_token (parser)->type == CPP_COMMA || c_parser_peek_2nd_token (parser)->type == CPP_SEMICOLON) { /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree id; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } id = c_parser_peek_token (parser)->value; objc_declare_protocol (id, attributes); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } else { tree id = c_parser_peek_token (parser)->value; tree proto = NULL_TREE; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); parser->objc_pq_context = true; objc_start_protocol (id, proto, attributes); c_parser_objc_methodprotolist (parser); c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>"); parser->objc_pq_context = false; objc_finish_interface (); } } /* Parse an objc-method-type. objc-method-type: + - Return true if it is a class method (+) and false if it is an instance method (-). */ static inline bool c_parser_objc_method_type (c_parser *parser) { switch (c_parser_peek_token (parser)->type) { case CPP_PLUS: c_parser_consume_token (parser); return true; case CPP_MINUS: c_parser_consume_token (parser); return false; default: gcc_unreachable (); } } /* Parse an objc-method-definition. objc-method-definition: objc-method-type objc-method-decl ;[opt] compound-statement */ static void c_parser_objc_method_definition (c_parser *parser) { bool is_class_method = c_parser_objc_method_type (parser); tree decl, attributes = NULL_TREE, expr = NULL_TREE; parser->objc_pq_context = true; decl = c_parser_objc_method_decl (parser, is_class_method, &attributes, &expr); if (decl == error_mark_node) return; /* Bail here. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { c_parser_consume_token (parser); pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic, "extra semicolon in method definition specified"); } if (!c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { c_parser_error (parser, "expected %<{%>"); return; } parser->objc_pq_context = false; if (objc_start_method_definition (is_class_method, decl, attributes, expr)) { add_stmt (c_parser_compound_statement (parser)); objc_finish_method_definition (current_function_decl); } else { /* This code is executed when we find a method definition outside of an @implementation context (or invalid for other reasons). Parse the method (to keep going) but do not emit any code. */ c_parser_compound_statement (parser); } } /* Parse an objc-methodprotolist. objc-methodprotolist: empty objc-methodprotolist objc-methodproto objc-methodprotolist declaration objc-methodprotolist ; @optional @required The declaration is a data definition, which may be missing declaration specifiers under the same rules and diagnostics as other data definitions outside functions, and the stray semicolon is diagnosed the same way as a stray semicolon outside a function. */ static void c_parser_objc_methodprotolist (c_parser *parser) { while (true) { /* The list is terminated by @end. */ switch (c_parser_peek_token (parser)->type) { case CPP_SEMICOLON: pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic, "ISO C does not allow extra %<;%> outside of a function"); c_parser_consume_token (parser); break; case CPP_PLUS: case CPP_MINUS: c_parser_objc_methodproto (parser); break; case CPP_PRAGMA: c_parser_pragma (parser, pragma_external, NULL); break; case CPP_EOF: return; default: if (c_parser_next_token_is_keyword (parser, RID_AT_END)) return; else if (c_parser_next_token_is_keyword (parser, RID_AT_PROPERTY)) c_parser_objc_at_property_declaration (parser); else if (c_parser_next_token_is_keyword (parser, RID_AT_OPTIONAL)) { objc_set_method_opt (true); c_parser_consume_token (parser); } else if (c_parser_next_token_is_keyword (parser, RID_AT_REQUIRED)) { objc_set_method_opt (false); c_parser_consume_token (parser); } else c_parser_declaration_or_fndef (parser, false, false, true, false, true, NULL, vNULL); break; } } } /* Parse an objc-methodproto. objc-methodproto: objc-method-type objc-method-decl ; */ static void c_parser_objc_methodproto (c_parser *parser) { bool is_class_method = c_parser_objc_method_type (parser); tree decl, attributes = NULL_TREE; /* Remember protocol qualifiers in prototypes. */ parser->objc_pq_context = true; decl = c_parser_objc_method_decl (parser, is_class_method, &attributes, NULL); /* Forget protocol qualifiers now. */ parser->objc_pq_context = false; /* Do not allow the presence of attributes to hide an erroneous method implementation in the interface section. */ if (!c_parser_next_token_is (parser, CPP_SEMICOLON)) { c_parser_error (parser, "expected %<;%>"); return; } if (decl != error_mark_node) objc_add_method_declaration (is_class_method, decl, attributes); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* If we are at a position that method attributes may be present, check that there are not any parsed already (a syntax error) and then collect any specified at the current location. Finally, if new attributes were present, check that the next token is legal ( ';' for decls and '{' for defs). */ static bool c_parser_objc_maybe_method_attributes (c_parser* parser, tree* attributes) { bool bad = false; if (*attributes) { c_parser_error (parser, "method attributes must be specified at the end only"); *attributes = NULL_TREE; bad = true; } if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) *attributes = c_parser_attributes (parser); /* If there were no attributes here, just report any earlier error. */ if (*attributes == NULL_TREE || bad) return bad; /* If the attributes are followed by a ; or {, then just report any earlier error. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON) || c_parser_next_token_is (parser, CPP_OPEN_BRACE)) return bad; /* We've got attributes, but not at the end. */ c_parser_error (parser, "expected %<;%> or %<{%> after method attribute definition"); return true; } /* Parse an objc-method-decl. objc-method-decl: ( objc-type-name ) objc-selector objc-selector ( objc-type-name ) objc-keyword-selector objc-optparmlist objc-keyword-selector objc-optparmlist attributes objc-keyword-selector: objc-keyword-decl objc-keyword-selector objc-keyword-decl objc-keyword-decl: objc-selector : ( objc-type-name ) identifier objc-selector : identifier : ( objc-type-name ) identifier : identifier objc-optparmlist: objc-optparms objc-optellipsis objc-optparms: empty objc-opt-parms , parameter-declaration objc-optellipsis: empty , ... */ static tree c_parser_objc_method_decl (c_parser *parser, bool is_class_method, tree *attributes, tree *expr) { tree type = NULL_TREE; tree sel; tree parms = NULL_TREE; bool ellipsis = false; bool attr_err = false; *attributes = NULL_TREE; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { matching_parens parens; parens.consume_open (parser); type = c_parser_objc_type_name (parser); parens.skip_until_found_close (parser); } sel = c_parser_objc_selector (parser); /* If there is no selector, or a colon follows, we have an objc-keyword-selector. If there is a selector, and a colon does not follow, that selector ends the objc-method-decl. */ if (!sel || c_parser_next_token_is (parser, CPP_COLON)) { tree tsel = sel; tree list = NULL_TREE; while (true) { tree atype = NULL_TREE, id, keyworddecl; tree param_attr = NULL_TREE; if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) break; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { c_parser_consume_token (parser); atype = c_parser_objc_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } /* New ObjC allows attributes on method parameters. */ if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) param_attr = c_parser_attributes (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return error_mark_node; } id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); keyworddecl = objc_build_keyword_decl (tsel, atype, id, param_attr); list = chainon (list, keyworddecl); tsel = c_parser_objc_selector (parser); if (!tsel && c_parser_next_token_is_not (parser, CPP_COLON)) break; } attr_err |= c_parser_objc_maybe_method_attributes (parser, attributes) ; /* Parse the optional parameter list. Optional Objective-C method parameters follow the C syntax, and may include '...' to denote a variable number of arguments. */ parms = make_node (TREE_LIST); while (c_parser_next_token_is (parser, CPP_COMMA)) { struct c_parm *parm; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { ellipsis = true; c_parser_consume_token (parser); attr_err |= c_parser_objc_maybe_method_attributes (parser, attributes) ; break; } parm = c_parser_parameter_declaration (parser, NULL_TREE); if (parm == NULL) break; parms = chainon (parms, build_tree_list (NULL_TREE, grokparm (parm, expr))); } sel = list; } else attr_err |= c_parser_objc_maybe_method_attributes (parser, attributes) ; if (sel == NULL) { c_parser_error (parser, "objective-c method declaration is expected"); return error_mark_node; } if (attr_err) return error_mark_node; return objc_build_method_signature (is_class_method, type, sel, parms, ellipsis); } /* Parse an objc-type-name. objc-type-name: objc-type-qualifiers[opt] type-name objc-type-qualifiers[opt] objc-type-qualifiers: objc-type-qualifier objc-type-qualifiers objc-type-qualifier objc-type-qualifier: one of in out inout bycopy byref oneway */ static tree c_parser_objc_type_name (c_parser *parser) { tree quals = NULL_TREE; struct c_type_name *type_name = NULL; tree type = NULL_TREE; while (true) { c_token *token = c_parser_peek_token (parser); if (token->type == CPP_KEYWORD && (token->keyword == RID_IN || token->keyword == RID_OUT || token->keyword == RID_INOUT || token->keyword == RID_BYCOPY || token->keyword == RID_BYREF || token->keyword == RID_ONEWAY)) { quals = chainon (build_tree_list (NULL_TREE, token->value), quals); c_parser_consume_token (parser); } else break; } if (c_parser_next_tokens_start_typename (parser, cla_prefer_type)) type_name = c_parser_type_name (parser); if (type_name) type = groktypename (type_name, NULL, NULL); /* If the type is unknown, and error has already been produced and we need to recover from the error. In that case, use NULL_TREE for the type, as if no type had been specified; this will use the default type ('id') which is good for error recovery. */ if (type == error_mark_node) type = NULL_TREE; return build_tree_list (quals, type); } /* Parse objc-protocol-refs. objc-protocol-refs: < identifier-list > */ static tree c_parser_objc_protocol_refs (c_parser *parser) { tree list = NULL_TREE; gcc_assert (c_parser_next_token_is (parser, CPP_LESS)); c_parser_consume_token (parser); /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree id; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } id = c_parser_peek_token (parser)->value; list = chainon (list, build_tree_list (NULL_TREE, id)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_require (parser, CPP_GREATER, "expected %<>%>"); return list; } /* Parse an objc-try-catch-finally-statement. objc-try-catch-finally-statement: @try compound-statement objc-catch-list[opt] @try compound-statement objc-catch-list[opt] @finally compound-statement objc-catch-list: @catch ( objc-catch-parameter-declaration ) compound-statement objc-catch-list @catch ( objc-catch-parameter-declaration ) compound-statement objc-catch-parameter-declaration: parameter-declaration '...' where '...' is to be interpreted literally, that is, it means CPP_ELLIPSIS. PS: This function is identical to cp_parser_objc_try_catch_finally_statement for C++. Keep them in sync. */ static void c_parser_objc_try_catch_finally_statement (c_parser *parser) { location_t location; tree stmt; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_TRY)); c_parser_consume_token (parser); location = c_parser_peek_token (parser)->location; objc_maybe_warn_exceptions (location); stmt = c_parser_compound_statement (parser); objc_begin_try_stmt (location, stmt); while (c_parser_next_token_is_keyword (parser, RID_AT_CATCH)) { struct c_parm *parm; tree parameter_declaration = error_mark_node; bool seen_open_paren = false; c_parser_consume_token (parser); matching_parens parens; if (!parens.require_open (parser)) seen_open_paren = true; if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { /* We have "@catch (...)" (where the '...' are literally what is in the code). Skip the '...'. parameter_declaration is set to NULL_TREE, and objc_being_catch_clauses() knows that that means '...'. */ c_parser_consume_token (parser); parameter_declaration = NULL_TREE; } else { /* We have "@catch (NSException *exception)" or something like that. Parse the parameter declaration. */ parm = c_parser_parameter_declaration (parser, NULL_TREE); if (parm == NULL) parameter_declaration = error_mark_node; else parameter_declaration = grokparm (parm, NULL); } if (seen_open_paren) parens.require_close (parser); else { /* If there was no open parenthesis, we are recovering from an error, and we are trying to figure out what mistake the user has made. */ /* If there is an immediate closing parenthesis, the user probably forgot the opening one (ie, they typed "@catch NSException *e)". Parse the closing parenthesis and keep going. */ if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) c_parser_consume_token (parser); /* If these is no immediate closing parenthesis, the user probably doesn't know that parenthesis are required at all (ie, they typed "@catch NSException *e"). So, just forget about the closing parenthesis and keep going. */ } objc_begin_catch_clause (parameter_declaration); if (c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>")) c_parser_compound_statement_nostart (parser); objc_finish_catch_clause (); } if (c_parser_next_token_is_keyword (parser, RID_AT_FINALLY)) { c_parser_consume_token (parser); location = c_parser_peek_token (parser)->location; stmt = c_parser_compound_statement (parser); objc_build_finally_clause (location, stmt); } objc_finish_try_stmt (); } /* Parse an objc-synchronized-statement. objc-synchronized-statement: @synchronized ( expression ) compound-statement */ static void c_parser_objc_synchronized_statement (c_parser *parser) { location_t loc; tree expr, stmt; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_SYNCHRONIZED)); c_parser_consume_token (parser); loc = c_parser_peek_token (parser)->location; objc_maybe_warn_exceptions (loc); matching_parens parens; if (parens.require_open (parser)) { struct c_expr ce = c_parser_expression (parser); ce = convert_lvalue_to_rvalue (loc, ce, false, false); expr = ce.value; expr = c_fully_fold (expr, false, NULL); parens.skip_until_found_close (parser); } else expr = error_mark_node; stmt = c_parser_compound_statement (parser); objc_build_synchronized (loc, expr, stmt); } /* Parse an objc-selector; return NULL_TREE without an error if the next token is not an objc-selector. objc-selector: identifier one of enum struct union if else while do for switch case default break continue return goto asm sizeof typeof __alignof unsigned long const short volatile signed restrict _Complex in out inout bycopy byref oneway int char float double void _Bool _Atomic ??? Why this selection of keywords but not, for example, storage class specifiers? */ static tree c_parser_objc_selector (c_parser *parser) { c_token *token = c_parser_peek_token (parser); tree value = token->value; if (token->type == CPP_NAME) { c_parser_consume_token (parser); return value; } if (token->type != CPP_KEYWORD) return NULL_TREE; switch (token->keyword) { case RID_ENUM: case RID_STRUCT: case RID_UNION: case RID_IF: case RID_ELSE: case RID_WHILE: case RID_DO: case RID_FOR: case RID_SWITCH: case RID_CASE: case RID_DEFAULT: case RID_BREAK: case RID_CONTINUE: case RID_RETURN: case RID_GOTO: case RID_ASM: case RID_SIZEOF: case RID_TYPEOF: case RID_ALIGNOF: case RID_UNSIGNED: case RID_LONG: case RID_CONST: case RID_SHORT: case RID_VOLATILE: case RID_SIGNED: case RID_RESTRICT: case RID_COMPLEX: case RID_IN: case RID_OUT: case RID_INOUT: case RID_BYCOPY: case RID_BYREF: case RID_ONEWAY: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: CASE_RID_FLOATN_NX: case RID_VOID: case RID_BOOL: case RID_ATOMIC: case RID_AUTO_TYPE: case RID_INT_N_0: case RID_INT_N_1: case RID_INT_N_2: case RID_INT_N_3: c_parser_consume_token (parser); return value; default: return NULL_TREE; } } /* Parse an objc-selector-arg. objc-selector-arg: objc-selector objc-keywordname-list objc-keywordname-list: objc-keywordname objc-keywordname-list objc-keywordname objc-keywordname: objc-selector : : */ static tree c_parser_objc_selector_arg (c_parser *parser) { tree sel = c_parser_objc_selector (parser); tree list = NULL_TREE; if (sel && c_parser_next_token_is_not (parser, CPP_COLON)) return sel; while (true) { if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) return list; list = chainon (list, build_tree_list (sel, NULL_TREE)); sel = c_parser_objc_selector (parser); if (!sel && c_parser_next_token_is_not (parser, CPP_COLON)) break; } return list; } /* Parse an objc-receiver. objc-receiver: expression class-name type-name */ static tree c_parser_objc_receiver (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; if (c_parser_peek_token (parser)->type == CPP_NAME && (c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME || c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME)) { tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); return objc_get_class_reference (id); } struct c_expr ce = c_parser_expression (parser); ce = convert_lvalue_to_rvalue (loc, ce, false, false); return c_fully_fold (ce.value, false, NULL); } /* Parse objc-message-args. objc-message-args: objc-selector objc-keywordarg-list objc-keywordarg-list: objc-keywordarg objc-keywordarg-list objc-keywordarg objc-keywordarg: objc-selector : objc-keywordexpr : objc-keywordexpr */ static tree c_parser_objc_message_args (c_parser *parser) { tree sel = c_parser_objc_selector (parser); tree list = NULL_TREE; if (sel && c_parser_next_token_is_not (parser, CPP_COLON)) return sel; while (true) { tree keywordexpr; if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) return error_mark_node; keywordexpr = c_parser_objc_keywordexpr (parser); list = chainon (list, build_tree_list (sel, keywordexpr)); sel = c_parser_objc_selector (parser); if (!sel && c_parser_next_token_is_not (parser, CPP_COLON)) break; } return list; } /* Parse an objc-keywordexpr. objc-keywordexpr: nonempty-expr-list */ static tree c_parser_objc_keywordexpr (c_parser *parser) { tree ret; vec<tree, va_gc> *expr_list = c_parser_expr_list (parser, true, true, NULL, NULL, NULL, NULL); if (vec_safe_length (expr_list) == 1) { /* Just return the expression, remove a level of indirection. */ ret = (*expr_list)[0]; } else { /* We have a comma expression, we will collapse later. */ ret = build_tree_list_vec (expr_list); } release_tree_vector (expr_list); return ret; } /* A check, needed in several places, that ObjC interface, implementation or method definitions are not prefixed by incorrect items. */ static bool c_parser_objc_diagnose_bad_element_prefix (c_parser *parser, struct c_declspecs *specs) { if (!specs->declspecs_seen_p || specs->non_sc_seen_p || specs->typespec_kind != ctsk_none) { c_parser_error (parser, "no type or storage class may be specified here,"); c_parser_skip_to_end_of_block_or_statement (parser); return true; } return false; } /* Parse an Objective-C @property declaration. The syntax is: objc-property-declaration: '@property' objc-property-attributes[opt] struct-declaration ; objc-property-attributes: '(' objc-property-attribute-list ')' objc-property-attribute-list: objc-property-attribute objc-property-attribute-list, objc-property-attribute objc-property-attribute 'getter' = identifier 'setter' = identifier 'readonly' 'readwrite' 'assign' 'retain' 'copy' 'nonatomic' For example: @property NSString *name; @property (readonly) id object; @property (retain, nonatomic, getter=getTheName) id name; @property int a, b, c; PS: This function is identical to cp_parser_objc_at_propery_declaration for C++. Keep them in sync. */ static void c_parser_objc_at_property_declaration (c_parser *parser) { /* The following variables hold the attributes of the properties as parsed. They are 'false' or 'NULL_TREE' if the attribute was not seen. When we see an attribute, we set them to 'true' (if they are boolean properties) or to the identifier (if they have an argument, ie, for getter and setter). Note that here we only parse the list of attributes, check the syntax and accumulate the attributes that we find. objc_add_property_declaration() will then process the information. */ bool property_assign = false; bool property_copy = false; tree property_getter_ident = NULL_TREE; bool property_nonatomic = false; bool property_readonly = false; bool property_readwrite = false; bool property_retain = false; tree property_setter_ident = NULL_TREE; /* 'properties' is the list of properties that we read. Usually a single one, but maybe more (eg, in "@property int a, b, c;" there are three). */ tree properties; location_t loc; loc = c_parser_peek_token (parser)->location; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_PROPERTY)); c_parser_consume_token (parser); /* Eat '@property'. */ /* Parse the optional attribute list... */ if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { matching_parens parens; /* Eat the '(' */ parens.consume_open (parser); /* Property attribute keywords are valid now. */ parser->objc_property_attr_context = true; while (true) { bool syntax_error = false; c_token *token = c_parser_peek_token (parser); enum rid keyword; if (token->type != CPP_KEYWORD) { if (token->type == CPP_CLOSE_PAREN) c_parser_error (parser, "expected identifier"); else { c_parser_consume_token (parser); c_parser_error (parser, "unknown property attribute"); } break; } keyword = token->keyword; c_parser_consume_token (parser); switch (keyword) { case RID_ASSIGN: property_assign = true; break; case RID_COPY: property_copy = true; break; case RID_NONATOMIC: property_nonatomic = true; break; case RID_READONLY: property_readonly = true; break; case RID_READWRITE: property_readwrite = true; break; case RID_RETAIN: property_retain = true; break; case RID_GETTER: case RID_SETTER: if (c_parser_next_token_is_not (parser, CPP_EQ)) { if (keyword == RID_GETTER) c_parser_error (parser, "missing %<=%> (after %<getter%> attribute)"); else c_parser_error (parser, "missing %<=%> (after %<setter%> attribute)"); syntax_error = true; break; } c_parser_consume_token (parser); /* eat the = */ if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); syntax_error = true; break; } if (keyword == RID_SETTER) { if (property_setter_ident != NULL_TREE) c_parser_error (parser, "the %<setter%> attribute may only be specified once"); else property_setter_ident = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_COLON)) c_parser_error (parser, "setter name must terminate with %<:%>"); else c_parser_consume_token (parser); } else { if (property_getter_ident != NULL_TREE) c_parser_error (parser, "the %<getter%> attribute may only be specified once"); else property_getter_ident = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } break; default: c_parser_error (parser, "unknown property attribute"); syntax_error = true; break; } if (syntax_error) break; if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } parser->objc_property_attr_context = false; parens.skip_until_found_close (parser); } /* ... and the property declaration(s). */ properties = c_parser_struct_declaration (parser); if (properties == error_mark_node) { c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); parser->error = false; return; } if (properties == NULL_TREE) c_parser_error (parser, "expected identifier"); else { /* Comma-separated properties are chained together in reverse order; add them one by one. */ properties = nreverse (properties); for (; properties; properties = TREE_CHAIN (properties)) objc_add_property_declaration (loc, copy_node (properties), property_readonly, property_readwrite, property_assign, property_retain, property_copy, property_nonatomic, property_getter_ident, property_setter_ident); } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); parser->error = false; } /* Parse an Objective-C @synthesize declaration. The syntax is: objc-synthesize-declaration: @synthesize objc-synthesize-identifier-list ; objc-synthesize-identifier-list: objc-synthesize-identifier objc-synthesize-identifier-list, objc-synthesize-identifier objc-synthesize-identifier identifier identifier = identifier For example: @synthesize MyProperty; @synthesize OneProperty, AnotherProperty=MyIvar, YetAnotherProperty; PS: This function is identical to cp_parser_objc_at_synthesize_declaration for C++. Keep them in sync. */ static void c_parser_objc_at_synthesize_declaration (c_parser *parser) { tree list = NULL_TREE; location_t loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_SYNTHESIZE)); loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); while (true) { tree property, ivar; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); /* Once we find the semicolon, we can resume normal parsing. We have to reset parser->error manually because c_parser_skip_until_found() won't reset it for us if the next token is precisely a semicolon. */ parser->error = false; return; } property = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_EQ)) { c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); parser->error = false; return; } ivar = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else ivar = NULL_TREE; list = chainon (list, build_tree_list (ivar, property)); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); objc_add_synthesize_declaration (loc, list); } /* Parse an Objective-C @dynamic declaration. The syntax is: objc-dynamic-declaration: @dynamic identifier-list ; For example: @dynamic MyProperty; @dynamic MyProperty, AnotherProperty; PS: This function is identical to cp_parser_objc_at_dynamic_declaration for C++. Keep them in sync. */ static void c_parser_objc_at_dynamic_declaration (c_parser *parser) { tree list = NULL_TREE; location_t loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_DYNAMIC)); loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); while (true) { tree property; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); parser->error = false; return; } property = c_parser_peek_token (parser)->value; list = chainon (list, build_tree_list (NULL_TREE, property)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); objc_add_dynamic_declaration (loc, list); } /* Parse a pragma GCC ivdep. */ static bool c_parse_pragma_ivdep (c_parser *parser) { c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); return true; } /* Parse a pragma GCC unroll. */ static unsigned short c_parser_pragma_unroll (c_parser *parser) { unsigned short unroll; c_parser_consume_pragma (parser); location_t location = c_parser_peek_token (parser)->location; tree expr = c_parser_expr_no_commas (parser, NULL).value; mark_exp_read (expr); expr = c_fully_fold (expr, false, NULL); HOST_WIDE_INT lunroll = 0; if (!INTEGRAL_TYPE_P (TREE_TYPE (expr)) || TREE_CODE (expr) != INTEGER_CST || (lunroll = tree_to_shwi (expr)) < 0 || lunroll >= USHRT_MAX) { error_at (location, "%<#pragma GCC unroll%> requires an" " assignment-expression that evaluates to a non-negative" " integral constant less than %u", USHRT_MAX); unroll = 0; } else { unroll = (unsigned short)lunroll; if (unroll == 0) unroll = 1; } c_parser_skip_to_pragma_eol (parser); return unroll; } /* Handle pragmas. Some OpenMP pragmas are associated with, and therefore should be considered, statements. ALLOW_STMT is true if we're within the context of a function and such pragmas are to be allowed. Returns true if we actually parsed such a pragma. */ static bool c_parser_pragma (c_parser *parser, enum pragma_context context, bool *if_p) { unsigned int id; const char *construct = NULL; id = c_parser_peek_token (parser)->pragma_kind; gcc_assert (id != PRAGMA_NONE); switch (id) { case PRAGMA_OACC_DECLARE: c_parser_oacc_declare (parser); return false; case PRAGMA_OACC_ENTER_DATA: if (context != pragma_compound) { construct = "acc enter data"; in_compound: if (context == pragma_stmt) { error_at (c_parser_peek_token (parser)->location, "%<#pragma %s%> may only be used in compound " "statements", construct); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; } goto bad_stmt; } c_parser_oacc_enter_exit_data (parser, true); return false; case PRAGMA_OACC_EXIT_DATA: if (context != pragma_compound) { construct = "acc exit data"; goto in_compound; } c_parser_oacc_enter_exit_data (parser, false); return false; case PRAGMA_OACC_ROUTINE: if (context != pragma_external) { error_at (c_parser_peek_token (parser)->location, "%<#pragma acc routine%> must be at file scope"); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; } c_parser_oacc_routine (parser, context); return false; case PRAGMA_OACC_UPDATE: if (context != pragma_compound) { construct = "acc update"; goto in_compound; } c_parser_oacc_update (parser); return false; case PRAGMA_OMP_BARRIER: if (context != pragma_compound) { construct = "omp barrier"; goto in_compound; } c_parser_omp_barrier (parser); return false; case PRAGMA_OMP_FLUSH: if (context != pragma_compound) { construct = "omp flush"; goto in_compound; } c_parser_omp_flush (parser); return false; case PRAGMA_OMP_TASKWAIT: if (context != pragma_compound) { construct = "omp taskwait"; goto in_compound; } c_parser_omp_taskwait (parser); return false; case PRAGMA_OMP_TASKYIELD: if (context != pragma_compound) { construct = "omp taskyield"; goto in_compound; } c_parser_omp_taskyield (parser); return false; case PRAGMA_OMP_CANCEL: if (context != pragma_compound) { construct = "omp cancel"; goto in_compound; } c_parser_omp_cancel (parser); return false; case PRAGMA_OMP_CANCELLATION_POINT: c_parser_omp_cancellation_point (parser, context); return false; case PRAGMA_OMP_THREADPRIVATE: c_parser_omp_threadprivate (parser); return false; case PRAGMA_OMP_TARGET: return c_parser_omp_target (parser, context, if_p); case PRAGMA_OMP_END_DECLARE_TARGET: c_parser_omp_end_declare_target (parser); return false; case PRAGMA_OMP_SECTION: error_at (c_parser_peek_token (parser)->location, "%<#pragma omp section%> may only be used in " "%<#pragma omp sections%> construct"); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; case PRAGMA_OMP_DECLARE: c_parser_omp_declare (parser, context); return false; case PRAGMA_OMP_ORDERED: return c_parser_omp_ordered (parser, context, if_p); case PRAGMA_IVDEP: { const bool ivdep = c_parse_pragma_ivdep (parser); unsigned short unroll; if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_UNROLL) unroll = c_parser_pragma_unroll (parser); else unroll = 0; if (!c_parser_next_token_is_keyword (parser, RID_FOR) && !c_parser_next_token_is_keyword (parser, RID_WHILE) && !c_parser_next_token_is_keyword (parser, RID_DO)) { c_parser_error (parser, "for, while or do statement expected"); return false; } if (c_parser_next_token_is_keyword (parser, RID_FOR)) c_parser_for_statement (parser, ivdep, unroll, if_p); else if (c_parser_next_token_is_keyword (parser, RID_WHILE)) c_parser_while_statement (parser, ivdep, unroll, if_p); else c_parser_do_statement (parser, ivdep, unroll); } return false; case PRAGMA_UNROLL: { unsigned short unroll = c_parser_pragma_unroll (parser); bool ivdep; if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_IVDEP) ivdep = c_parse_pragma_ivdep (parser); else ivdep = false; if (!c_parser_next_token_is_keyword (parser, RID_FOR) && !c_parser_next_token_is_keyword (parser, RID_WHILE) && !c_parser_next_token_is_keyword (parser, RID_DO)) { c_parser_error (parser, "for, while or do statement expected"); return false; } if (c_parser_next_token_is_keyword (parser, RID_FOR)) c_parser_for_statement (parser, ivdep, unroll, if_p); else if (c_parser_next_token_is_keyword (parser, RID_WHILE)) c_parser_while_statement (parser, ivdep, unroll, if_p); else c_parser_do_statement (parser, ivdep, unroll); } return false; case PRAGMA_GCC_PCH_PREPROCESS: c_parser_error (parser, "%<#pragma GCC pch_preprocess%> must be first"); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; case PRAGMA_OACC_WAIT: if (context != pragma_compound) { construct = "acc wait"; goto in_compound; } /* FALL THROUGH. */ default: if (id < PRAGMA_FIRST_EXTERNAL) { if (context != pragma_stmt && context != pragma_compound) { bad_stmt: c_parser_error (parser, "expected declaration specifiers"); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; } c_parser_omp_construct (parser, if_p); return true; } break; } c_parser_consume_pragma (parser); c_invoke_pragma_handler (id); /* Skip to EOL, but suppress any error message. Those will have been generated by the handler routine through calling error, as opposed to calling c_parser_error. */ parser->error = true; c_parser_skip_to_pragma_eol (parser); return false; } /* The interface the pragma parsers have to the lexer. */ enum cpp_ttype pragma_lex (tree *value, location_t *loc) { c_token *tok = c_parser_peek_token (the_parser); enum cpp_ttype ret = tok->type; *value = tok->value; if (loc) *loc = tok->location; if (ret == CPP_PRAGMA_EOL || ret == CPP_EOF) ret = CPP_EOF; else { if (ret == CPP_KEYWORD) ret = CPP_NAME; c_parser_consume_token (the_parser); } return ret; } static void c_parser_pragma_pch_preprocess (c_parser *parser) { tree name = NULL; c_parser_consume_pragma (parser); if (c_parser_next_token_is (parser, CPP_STRING)) { name = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else c_parser_error (parser, "expected string literal"); c_parser_skip_to_pragma_eol (parser); if (name) c_common_pch_pragma (parse_in, TREE_STRING_POINTER (name)); } /* OpenACC and OpenMP parsing routines. */ /* Returns name of the next clause. If the clause is not recognized PRAGMA_OMP_CLAUSE_NONE is returned and the token is not consumed. Otherwise appropriate pragma_omp_clause is returned and the token is consumed. */ static pragma_omp_clause c_parser_omp_clause_name (c_parser *parser) { pragma_omp_clause result = PRAGMA_OMP_CLAUSE_NONE; if (c_parser_next_token_is_keyword (parser, RID_AUTO)) result = PRAGMA_OACC_CLAUSE_AUTO; else if (c_parser_next_token_is_keyword (parser, RID_IF)) result = PRAGMA_OMP_CLAUSE_IF; else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT)) result = PRAGMA_OMP_CLAUSE_DEFAULT; else if (c_parser_next_token_is_keyword (parser, RID_FOR)) result = PRAGMA_OMP_CLAUSE_FOR; else if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); switch (p[0]) { case 'a': if (!strcmp ("aligned", p)) result = PRAGMA_OMP_CLAUSE_ALIGNED; else if (!strcmp ("async", p)) result = PRAGMA_OACC_CLAUSE_ASYNC; break; case 'c': if (!strcmp ("collapse", p)) result = PRAGMA_OMP_CLAUSE_COLLAPSE; else if (!strcmp ("copy", p)) result = PRAGMA_OACC_CLAUSE_COPY; else if (!strcmp ("copyin", p)) result = PRAGMA_OMP_CLAUSE_COPYIN; else if (!strcmp ("copyout", p)) result = PRAGMA_OACC_CLAUSE_COPYOUT; else if (!strcmp ("copyprivate", p)) result = PRAGMA_OMP_CLAUSE_COPYPRIVATE; else if (!strcmp ("create", p)) result = PRAGMA_OACC_CLAUSE_CREATE; break; case 'd': if (!strcmp ("defaultmap", p)) result = PRAGMA_OMP_CLAUSE_DEFAULTMAP; else if (!strcmp ("delete", p)) result = PRAGMA_OACC_CLAUSE_DELETE; else if (!strcmp ("depend", p)) result = PRAGMA_OMP_CLAUSE_DEPEND; else if (!strcmp ("device", p)) result = PRAGMA_OMP_CLAUSE_DEVICE; else if (!strcmp ("deviceptr", p)) result = PRAGMA_OACC_CLAUSE_DEVICEPTR; else if (!strcmp ("device_resident", p)) result = PRAGMA_OACC_CLAUSE_DEVICE_RESIDENT; else if (!strcmp ("dist_schedule", p)) result = PRAGMA_OMP_CLAUSE_DIST_SCHEDULE; break; case 'f': if (!strcmp ("final", p)) result = PRAGMA_OMP_CLAUSE_FINAL; else if (!strcmp ("firstprivate", p)) result = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE; else if (!strcmp ("from", p)) result = PRAGMA_OMP_CLAUSE_FROM; break; case 'g': if (!strcmp ("gang", p)) result = PRAGMA_OACC_CLAUSE_GANG; else if (!strcmp ("grainsize", p)) result = PRAGMA_OMP_CLAUSE_GRAINSIZE; break; case 'h': if (!strcmp ("hint", p)) result = PRAGMA_OMP_CLAUSE_HINT; else if (!strcmp ("host", p)) result = PRAGMA_OACC_CLAUSE_HOST; break; case 'i': if (!strcmp ("inbranch", p)) result = PRAGMA_OMP_CLAUSE_INBRANCH; else if (!strcmp ("independent", p)) result = PRAGMA_OACC_CLAUSE_INDEPENDENT; else if (!strcmp ("is_device_ptr", p)) result = PRAGMA_OMP_CLAUSE_IS_DEVICE_PTR; break; case 'l': if (!strcmp ("lastprivate", p)) result = PRAGMA_OMP_CLAUSE_LASTPRIVATE; else if (!strcmp ("linear", p)) result = PRAGMA_OMP_CLAUSE_LINEAR; else if (!strcmp ("link", p)) result = PRAGMA_OMP_CLAUSE_LINK; break; case 'm': if (!strcmp ("map", p)) result = PRAGMA_OMP_CLAUSE_MAP; else if (!strcmp ("mergeable", p)) result = PRAGMA_OMP_CLAUSE_MERGEABLE; break; case 'n': if (!strcmp ("nogroup", p)) result = PRAGMA_OMP_CLAUSE_NOGROUP; else if (!strcmp ("notinbranch", p)) result = PRAGMA_OMP_CLAUSE_NOTINBRANCH; else if (!strcmp ("nowait", p)) result = PRAGMA_OMP_CLAUSE_NOWAIT; else if (!strcmp ("num_gangs", p)) result = PRAGMA_OACC_CLAUSE_NUM_GANGS; else if (!strcmp ("num_tasks", p)) result = PRAGMA_OMP_CLAUSE_NUM_TASKS; else if (!strcmp ("num_teams", p)) result = PRAGMA_OMP_CLAUSE_NUM_TEAMS; else if (!strcmp ("num_threads", p)) result = PRAGMA_OMP_CLAUSE_NUM_THREADS; else if (!strcmp ("num_workers", p)) result = PRAGMA_OACC_CLAUSE_NUM_WORKERS; break; case 'o': if (!strcmp ("ordered", p)) result = PRAGMA_OMP_CLAUSE_ORDERED; break; case 'p': if (!strcmp ("parallel", p)) result = PRAGMA_OMP_CLAUSE_PARALLEL; else if (!strcmp ("present", p)) result = PRAGMA_OACC_CLAUSE_PRESENT; else if (!strcmp ("present_or_copy", p) || !strcmp ("pcopy", p)) result = PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY; else if (!strcmp ("present_or_copyin", p) || !strcmp ("pcopyin", p)) result = PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN; else if (!strcmp ("present_or_copyout", p) || !strcmp ("pcopyout", p)) result = PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT; else if (!strcmp ("present_or_create", p) || !strcmp ("pcreate", p)) result = PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE; else if (!strcmp ("priority", p)) result = PRAGMA_OMP_CLAUSE_PRIORITY; else if (!strcmp ("private", p)) result = PRAGMA_OMP_CLAUSE_PRIVATE; else if (!strcmp ("proc_bind", p)) result = PRAGMA_OMP_CLAUSE_PROC_BIND; break; case 'r': if (!strcmp ("reduction", p)) result = PRAGMA_OMP_CLAUSE_REDUCTION; break; case 's': if (!strcmp ("safelen", p)) result = PRAGMA_OMP_CLAUSE_SAFELEN; else if (!strcmp ("schedule", p)) result = PRAGMA_OMP_CLAUSE_SCHEDULE; else if (!strcmp ("sections", p)) result = PRAGMA_OMP_CLAUSE_SECTIONS; else if (!strcmp ("seq", p)) result = PRAGMA_OACC_CLAUSE_SEQ; else if (!strcmp ("shared", p)) result = PRAGMA_OMP_CLAUSE_SHARED; else if (!strcmp ("simd", p)) result = PRAGMA_OMP_CLAUSE_SIMD; else if (!strcmp ("simdlen", p)) result = PRAGMA_OMP_CLAUSE_SIMDLEN; else if (!strcmp ("self", p)) result = PRAGMA_OACC_CLAUSE_SELF; break; case 't': if (!strcmp ("taskgroup", p)) result = PRAGMA_OMP_CLAUSE_TASKGROUP; else if (!strcmp ("thread_limit", p)) result = PRAGMA_OMP_CLAUSE_THREAD_LIMIT; else if (!strcmp ("threads", p)) result = PRAGMA_OMP_CLAUSE_THREADS; else if (!strcmp ("tile", p)) result = PRAGMA_OACC_CLAUSE_TILE; else if (!strcmp ("to", p)) result = PRAGMA_OMP_CLAUSE_TO; break; case 'u': if (!strcmp ("uniform", p)) result = PRAGMA_OMP_CLAUSE_UNIFORM; else if (!strcmp ("untied", p)) result = PRAGMA_OMP_CLAUSE_UNTIED; else if (!strcmp ("use_device", p)) result = PRAGMA_OACC_CLAUSE_USE_DEVICE; else if (!strcmp ("use_device_ptr", p)) result = PRAGMA_OMP_CLAUSE_USE_DEVICE_PTR; break; case 'v': if (!strcmp ("vector", p)) result = PRAGMA_OACC_CLAUSE_VECTOR; else if (!strcmp ("vector_length", p)) result = PRAGMA_OACC_CLAUSE_VECTOR_LENGTH; break; case 'w': if (!strcmp ("wait", p)) result = PRAGMA_OACC_CLAUSE_WAIT; else if (!strcmp ("worker", p)) result = PRAGMA_OACC_CLAUSE_WORKER; break; } } if (result != PRAGMA_OMP_CLAUSE_NONE) c_parser_consume_token (parser); return result; } /* Validate that a clause of the given type does not already exist. */ static void check_no_duplicate_clause (tree clauses, enum omp_clause_code code, const char *name) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == code) { location_t loc = OMP_CLAUSE_LOCATION (c); error_at (loc, "too many %qs clauses", name); break; } } /* OpenACC 2.0 Parse wait clause or wait directive parameters. */ static tree c_parser_oacc_wait_list (c_parser *parser, location_t clause_loc, tree list) { vec<tree, va_gc> *args; tree t, args_tree; matching_parens parens; if (!parens.require_open (parser)) return list; args = c_parser_expr_list (parser, false, true, NULL, NULL, NULL, NULL); if (args->length () == 0) { c_parser_error (parser, "expected integer expression before ')'"); release_tree_vector (args); return list; } args_tree = build_tree_list_vec (args); for (t = args_tree; t; t = TREE_CHAIN (t)) { tree targ = TREE_VALUE (t); if (targ != error_mark_node) { if (!INTEGRAL_TYPE_P (TREE_TYPE (targ))) { c_parser_error (parser, "expression must be integral"); targ = error_mark_node; } else { tree c = build_omp_clause (clause_loc, OMP_CLAUSE_WAIT); OMP_CLAUSE_DECL (c) = targ; OMP_CLAUSE_CHAIN (c) = list; list = c; } } } release_tree_vector (args); parens.require_close (parser); return list; } /* OpenACC 2.0, OpenMP 2.5: variable-list: identifier variable-list , identifier If KIND is nonzero, create the appropriate node and install the decl in OMP_CLAUSE_DECL and add the node to the head of the list. If KIND is nonzero, CLAUSE_LOC is the location of the clause. If KIND is zero, create a TREE_LIST with the decl in TREE_PURPOSE; return the list created. */ static tree c_parser_omp_variable_list (c_parser *parser, location_t clause_loc, enum omp_clause_code kind, tree list) { if (c_parser_next_token_is_not (parser, CPP_NAME) || c_parser_peek_token (parser)->id_kind != C_ID_ID) c_parser_error (parser, "expected identifier"); while (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID) { tree t = lookup_name (c_parser_peek_token (parser)->value); if (t == NULL_TREE) { undeclared_variable (c_parser_peek_token (parser)->location, c_parser_peek_token (parser)->value); t = error_mark_node; } c_parser_consume_token (parser); if (t == error_mark_node) ; else if (kind != 0) { switch (kind) { case OMP_CLAUSE__CACHE_: /* The OpenACC cache directive explicitly only allows "array elements or subarrays". */ if (c_parser_peek_token (parser)->type != CPP_OPEN_SQUARE) { c_parser_error (parser, "expected %<[%>"); t = error_mark_node; break; } /* FALLTHROUGH */ case OMP_CLAUSE_MAP: case OMP_CLAUSE_FROM: case OMP_CLAUSE_TO: while (c_parser_next_token_is (parser, CPP_DOT)) { location_t op_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); if (!c_parser_next_token_is (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); t = error_mark_node; break; } c_token *comp_tok = c_parser_peek_token (parser); tree ident = comp_tok->value; location_t comp_loc = comp_tok->location; c_parser_consume_token (parser); t = build_component_ref (op_loc, t, ident, comp_loc); } /* FALLTHROUGH */ case OMP_CLAUSE_DEPEND: case OMP_CLAUSE_REDUCTION: while (c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) { tree low_bound = NULL_TREE, length = NULL_TREE; c_parser_consume_token (parser); if (!c_parser_next_token_is (parser, CPP_COLON)) { location_t expr_loc = c_parser_peek_token (parser)->location; c_expr expr = c_parser_expression (parser); expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true); low_bound = expr.value; } if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) length = integer_one_node; else { /* Look for `:'. */ if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) { t = error_mark_node; break; } if (!c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) { location_t expr_loc = c_parser_peek_token (parser)->location; c_expr expr = c_parser_expression (parser); expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true); length = expr.value; } } /* Look for the closing `]'. */ if (!c_parser_require (parser, CPP_CLOSE_SQUARE, "expected %<]%>")) { t = error_mark_node; break; } t = tree_cons (low_bound, length, t); } break; default: break; } if (t != error_mark_node) { tree u = build_omp_clause (clause_loc, kind); OMP_CLAUSE_DECL (u) = t; OMP_CLAUSE_CHAIN (u) = list; list = u; } } else list = tree_cons (t, NULL_TREE, list); if (c_parser_next_token_is_not (parser, CPP_COMMA)) break; c_parser_consume_token (parser); } return list; } /* Similarly, but expect leading and trailing parenthesis. This is a very common case for OpenACC and OpenMP clauses. */ static tree c_parser_omp_var_list_parens (c_parser *parser, enum omp_clause_code kind, tree list) { /* The clauses location. */ location_t loc = c_parser_peek_token (parser)->location; matching_parens parens; if (parens.require_open (parser)) { list = c_parser_omp_variable_list (parser, loc, kind, list); parens.skip_until_found_close (parser); } return list; } /* OpenACC 2.0: copy ( variable-list ) copyin ( variable-list ) copyout ( variable-list ) create ( variable-list ) delete ( variable-list ) present ( variable-list ) present_or_copy ( variable-list ) pcopy ( variable-list ) present_or_copyin ( variable-list ) pcopyin ( variable-list ) present_or_copyout ( variable-list ) pcopyout ( variable-list ) present_or_create ( variable-list ) pcreate ( variable-list ) */ static tree c_parser_oacc_data_clause (c_parser *parser, pragma_omp_clause c_kind, tree list) { enum gomp_map_kind kind; switch (c_kind) { case PRAGMA_OACC_CLAUSE_COPY: kind = GOMP_MAP_FORCE_TOFROM; break; case PRAGMA_OACC_CLAUSE_COPYIN: kind = GOMP_MAP_FORCE_TO; break; case PRAGMA_OACC_CLAUSE_COPYOUT: kind = GOMP_MAP_FORCE_FROM; break; case PRAGMA_OACC_CLAUSE_CREATE: kind = GOMP_MAP_FORCE_ALLOC; break; case PRAGMA_OACC_CLAUSE_DELETE: kind = GOMP_MAP_DELETE; break; case PRAGMA_OACC_CLAUSE_DEVICE: kind = GOMP_MAP_FORCE_TO; break; case PRAGMA_OACC_CLAUSE_DEVICE_RESIDENT: kind = GOMP_MAP_DEVICE_RESIDENT; break; case PRAGMA_OACC_CLAUSE_HOST: case PRAGMA_OACC_CLAUSE_SELF: kind = GOMP_MAP_FORCE_FROM; break; case PRAGMA_OACC_CLAUSE_LINK: kind = GOMP_MAP_LINK; break; case PRAGMA_OACC_CLAUSE_PRESENT: kind = GOMP_MAP_FORCE_PRESENT; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY: kind = GOMP_MAP_TOFROM; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN: kind = GOMP_MAP_TO; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT: kind = GOMP_MAP_FROM; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE: kind = GOMP_MAP_ALLOC; break; default: gcc_unreachable (); } tree nl, c; nl = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_MAP, list); for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c)) OMP_CLAUSE_SET_MAP_KIND (c, kind); return nl; } /* OpenACC 2.0: deviceptr ( variable-list ) */ static tree c_parser_oacc_data_clause_deviceptr (c_parser *parser, tree list) { location_t loc = c_parser_peek_token (parser)->location; tree vars, t; /* Can't use OMP_CLAUSE_MAP here (that is, can't use the generic c_parser_oacc_data_clause), as for PRAGMA_OACC_CLAUSE_DEVICEPTR, variable-list must only allow for pointer variables. */ vars = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_ERROR, NULL); for (t = vars; t && t; t = TREE_CHAIN (t)) { tree v = TREE_PURPOSE (t); /* FIXME diagnostics: Ideally we should keep individual locations for all the variables in the var list to make the following errors more precise. Perhaps c_parser_omp_var_list_parens() should construct a list of locations to go along with the var list. */ if (!VAR_P (v) && TREE_CODE (v) != PARM_DECL) error_at (loc, "%qD is not a variable", v); else if (TREE_TYPE (v) == error_mark_node) ; else if (!POINTER_TYPE_P (TREE_TYPE (v))) error_at (loc, "%qD is not a pointer variable", v); tree u = build_omp_clause (loc, OMP_CLAUSE_MAP); OMP_CLAUSE_SET_MAP_KIND (u, GOMP_MAP_FORCE_DEVICEPTR); OMP_CLAUSE_DECL (u) = v; OMP_CLAUSE_CHAIN (u) = list; list = u; } return list; } /* OpenACC 2.0, OpenMP 3.0: collapse ( constant-expression ) */ static tree c_parser_omp_clause_collapse (c_parser *parser, tree list) { tree c, num = error_mark_node; HOST_WIDE_INT n; location_t loc; check_no_duplicate_clause (list, OMP_CLAUSE_COLLAPSE, "collapse"); check_no_duplicate_clause (list, OMP_CLAUSE_TILE, "tile"); loc = c_parser_peek_token (parser)->location; matching_parens parens; if (parens.require_open (parser)) { num = c_parser_expr_no_commas (parser, NULL).value; parens.skip_until_found_close (parser); } if (num == error_mark_node) return list; mark_exp_read (num); num = c_fully_fold (num, false, NULL); if (!INTEGRAL_TYPE_P (TREE_TYPE (num)) || !tree_fits_shwi_p (num) || (n = tree_to_shwi (num)) <= 0 || (int) n != n) { error_at (loc, "collapse argument needs positive constant integer expression"); return list; } c = build_omp_clause (loc, OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR (c) = num; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: copyin ( variable-list ) */ static tree c_parser_omp_clause_copyin (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYIN, list); } /* OpenMP 2.5: copyprivate ( variable-list ) */ static tree c_parser_omp_clause_copyprivate (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYPRIVATE, list); } /* OpenMP 2.5: default ( none | shared ) OpenACC: default ( none | present ) */ static tree c_parser_omp_clause_default (c_parser *parser, tree list, bool is_oacc) { enum omp_clause_default_kind kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED; location_t loc = c_parser_peek_token (parser)->location; tree c; matching_parens parens; if (!parens.require_open (parser)) return list; if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); switch (p[0]) { case 'n': if (strcmp ("none", p) != 0) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_NONE; break; case 'p': if (strcmp ("present", p) != 0 || !is_oacc) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_PRESENT; break; case 's': if (strcmp ("shared", p) != 0 || is_oacc) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_SHARED; break; default: goto invalid_kind; } c_parser_consume_token (parser); } else { invalid_kind: if (is_oacc) c_parser_error (parser, "expected %<none%> or %<present%>"); else c_parser_error (parser, "expected %<none%> or %<shared%>"); } parens.skip_until_found_close (parser); if (kind == OMP_CLAUSE_DEFAULT_UNSPECIFIED) return list; check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULT, "default"); c = build_omp_clause (loc, OMP_CLAUSE_DEFAULT); OMP_CLAUSE_CHAIN (c) = list; OMP_CLAUSE_DEFAULT_KIND (c) = kind; return c; } /* OpenMP 2.5: firstprivate ( variable-list ) */ static tree c_parser_omp_clause_firstprivate (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_FIRSTPRIVATE, list); } /* OpenMP 3.1: final ( expression ) */ static tree c_parser_omp_clause_final (c_parser *parser, tree list) { location_t loc = c_parser_peek_token (parser)->location; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { tree t = c_parser_paren_condition (parser); tree c; check_no_duplicate_clause (list, OMP_CLAUSE_FINAL, "final"); c = build_omp_clause (loc, OMP_CLAUSE_FINAL); OMP_CLAUSE_FINAL_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } else c_parser_error (parser, "expected %<(%>"); return list; } /* OpenACC, OpenMP 2.5: if ( expression ) OpenMP 4.5: if ( directive-name-modifier : expression ) directive-name-modifier: parallel | task | taskloop | target data | target | target update | target enter data | target exit data */ static tree c_parser_omp_clause_if (c_parser *parser, tree list, bool is_omp) { location_t location = c_parser_peek_token (parser)->location; enum tree_code if_modifier = ERROR_MARK; matching_parens parens; if (!parens.require_open (parser)) return list; if (is_omp && c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); int n = 2; if (strcmp (p, "parallel") == 0) if_modifier = OMP_PARALLEL; else if (strcmp (p, "task") == 0) if_modifier = OMP_TASK; else if (strcmp (p, "taskloop") == 0) if_modifier = OMP_TASKLOOP; else if (strcmp (p, "target") == 0) { if_modifier = OMP_TARGET; if (c_parser_peek_2nd_token (parser)->type == CPP_NAME) { p = IDENTIFIER_POINTER (c_parser_peek_2nd_token (parser)->value); if (strcmp ("data", p) == 0) if_modifier = OMP_TARGET_DATA; else if (strcmp ("update", p) == 0) if_modifier = OMP_TARGET_UPDATE; else if (strcmp ("enter", p) == 0) if_modifier = OMP_TARGET_ENTER_DATA; else if (strcmp ("exit", p) == 0) if_modifier = OMP_TARGET_EXIT_DATA; if (if_modifier != OMP_TARGET) { n = 3; c_parser_consume_token (parser); } else { location_t loc = c_parser_peek_2nd_token (parser)->location; error_at (loc, "expected %<data%>, %<update%>, %<enter%> " "or %<exit%>"); if_modifier = ERROR_MARK; } if (if_modifier == OMP_TARGET_ENTER_DATA || if_modifier == OMP_TARGET_EXIT_DATA) { if (c_parser_peek_2nd_token (parser)->type == CPP_NAME) { p = IDENTIFIER_POINTER (c_parser_peek_2nd_token (parser)->value); if (strcmp ("data", p) == 0) n = 4; } if (n == 4) c_parser_consume_token (parser); else { location_t loc = c_parser_peek_2nd_token (parser)->location; error_at (loc, "expected %<data%>"); if_modifier = ERROR_MARK; } } } } if (if_modifier != ERROR_MARK) { if (c_parser_peek_2nd_token (parser)->type == CPP_COLON) { c_parser_consume_token (parser); c_parser_consume_token (parser); } else { if (n > 2) { location_t loc = c_parser_peek_2nd_token (parser)->location; error_at (loc, "expected %<:%>"); } if_modifier = ERROR_MARK; } } } tree t = c_parser_condition (parser), c; parens.skip_until_found_close (parser); for (c = list; c ; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IF) { if (if_modifier != ERROR_MARK && OMP_CLAUSE_IF_MODIFIER (c) == if_modifier) { const char *p = NULL; switch (if_modifier) { case OMP_PARALLEL: p = "parallel"; break; case OMP_TASK: p = "task"; break; case OMP_TASKLOOP: p = "taskloop"; break; case OMP_TARGET_DATA: p = "target data"; break; case OMP_TARGET: p = "target"; break; case OMP_TARGET_UPDATE: p = "target update"; break; case OMP_TARGET_ENTER_DATA: p = "enter data"; break; case OMP_TARGET_EXIT_DATA: p = "exit data"; break; default: gcc_unreachable (); } error_at (location, "too many %<if%> clauses with %qs modifier", p); return list; } else if (OMP_CLAUSE_IF_MODIFIER (c) == if_modifier) { if (!is_omp) error_at (location, "too many %<if%> clauses"); else error_at (location, "too many %<if%> clauses without modifier"); return list; } else if (if_modifier == ERROR_MARK || OMP_CLAUSE_IF_MODIFIER (c) == ERROR_MARK) { error_at (location, "if any %<if%> clause has modifier, then all " "%<if%> clauses have to use modifier"); return list; } } c = build_omp_clause (location, OMP_CLAUSE_IF); OMP_CLAUSE_IF_MODIFIER (c) = if_modifier; OMP_CLAUSE_IF_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: lastprivate ( variable-list ) */ static tree c_parser_omp_clause_lastprivate (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_LASTPRIVATE, list); } /* OpenMP 3.1: mergeable */ static tree c_parser_omp_clause_mergeable (c_parser *parser ATTRIBUTE_UNUSED, tree list) { tree c; /* FIXME: Should we allow duplicates? */ check_no_duplicate_clause (list, OMP_CLAUSE_MERGEABLE, "mergeable"); c = build_omp_clause (c_parser_peek_token (parser)->location, OMP_CLAUSE_MERGEABLE); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: nowait */ static tree c_parser_omp_clause_nowait (c_parser *parser ATTRIBUTE_UNUSED, tree list) { tree c; location_t loc = c_parser_peek_token (parser)->location; check_no_duplicate_clause (list, OMP_CLAUSE_NOWAIT, "nowait"); c = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: num_threads ( expression ) */ static tree c_parser_omp_clause_num_threads (c_parser *parser, tree list) { location_t num_threads_loc = c_parser_peek_token (parser)->location; matching_parens parens; if (parens.require_open (parser)) { location_t expr_loc = c_parser_peek_token (parser)->location; c_expr expr = c_parser_expression (parser); expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true); tree c, t = expr.value; t = c_fully_fold (t, false, NULL); parens.skip_until_found_close (parser); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { c_parser_error (parser, "expected integer expression"); return list; } /* Attempt to statically determine when the number isn't positive. */ c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); protected_set_expr_location (c, expr_loc); if (c == boolean_true_node) { warning_at (expr_loc, 0, "%<num_threads%> value must be positive"); t = integer_one_node; } check_no_duplicate_clause (list, OMP_CLAUSE_NUM_THREADS, "num_threads"); c = build_omp_clause (num_threads_loc, OMP_CLAUSE_NUM_THREADS); OMP_CLAUSE_NUM_THREADS_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } return list; } /* OpenMP 4.5: num_tasks ( expression ) */ static tree c_parser_omp_clause_num_tasks (c_parser *parser, tree list) { location_t num_tasks_loc = c_parser_peek_token (parser)->location; matching_parens parens; if (parens.require_open (parser)) { location_t expr_loc = c_parser_peek_token (parser)->location; c_expr expr = c_parser_expression (parser); expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true); tree c, t = expr.value; t = c_fully_fold (t, false, NULL); parens.skip_until_found_close (parser); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { c_parser_error (parser, "expected integer expression"); return list; } /* Attempt to statically determine when the number isn't positive. */ c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); if (CAN_HAVE_LOCATION_P (c)) SET_EXPR_LOCATION (c, expr_loc); if (c == boolean_true_node) { warning_at (expr_loc, 0, "%<num_tasks%> value must be positive"); t = integer_one_node; } check_no_duplicate_clause (list, OMP_CLAUSE_NUM_TASKS, "num_tasks"); c = build_omp_clause (num_tasks_loc, OMP_CLAUSE_NUM_TASKS); OMP_CLAUSE_NUM_TASKS_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } return list; } /* OpenMP 4.5: grainsize ( expression ) */ static tree c_parser_omp_clause_grainsize (c_parser *parser, tree list) { location_t grainsize_loc = c_parser_peek_token (parser)->location; matching_parens parens; if (parens.require_open (parser)) { location_t expr_loc = c_parser_peek_token (parser)->location; c_expr expr = c_parser_expression (parser); expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true); tree c, t = expr.value; t = c_fully_fold (t, false, NULL); parens.skip_until_found_close (parser); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { c_parser_error (parser, "expected integer expression"); return list; } /* Attempt to statically determine when the number isn't positive. */ c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); if (CAN_HAVE_LOCATION_P (c)) SET_EXPR_LOCATION (c, expr_loc); if (c == boolean_true_node) { warning_at (expr_loc, 0, "%<grainsize%> value must be positive"); t = integer_one_node; } check_no_duplicate_clause (list, OMP_CLAUSE_GRAINSIZE, "grainsize"); c = build_omp_clause (grainsize_loc, OMP_CLAUSE_GRAINSIZE); OMP_CLAUSE_GRAINSIZE_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } return list; } /* OpenMP 4.5: priority ( expression ) */ static tree c_parser_omp_clause_priority (c_parser *parser, tree list) { location_t priority_loc = c_parser_peek_token (parser)->location; matching_parens parens; if (parens.require_open (parser)) { location_t expr_loc = c_parser_peek_token (parser)->location; c_expr expr = c_parser_expression (parser); expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true); tree c, t = expr.value; t = c_fully_fold (t, false, NULL); parens.skip_until_found_close (parser); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { c_parser_error (parser, "expected integer expression"); return list; } /* Attempt to statically determine when the number isn't non-negative. */ c = fold_build2_loc (expr_loc, LT_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); if (CAN_HAVE_LOCATION_P (c)) SET_EXPR_LOCATION (c, expr_loc); if (c == boolean_true_node) { warning_at (expr_loc, 0, "%<priority%> value must be non-negative"); t = integer_one_node; } check_no_duplicate_clause (list, OMP_CLAUSE_PRIORITY, "priority"); c = build_omp_clause (priority_loc, OMP_CLAUSE_PRIORITY); OMP_CLAUSE_PRIORITY_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } return list; } /* OpenMP 4.5: hint ( expression ) */ static tree c_parser_omp_clause_hint (c_parser *parser, tree list) { location_t hint_loc = c_parser_peek_token (parser)->location; matching_parens parens; if (parens.require_open (parser)) { location_t expr_loc = c_parser_peek_token (parser)->location; c_expr expr = c_parser_expression (parser); expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true); tree c, t = expr.value; t = c_fully_fold (t, false, NULL); parens.skip_until_found_close (parser); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { c_parser_error (parser, "expected integer expression"); return list; } check_no_duplicate_clause (list, OMP_CLAUSE_HINT, "hint"); c = build_omp_clause (hint_loc, OMP_CLAUSE_HINT); OMP_CLAUSE_HINT_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } return list; } /* OpenMP 4.5: defaultmap ( tofrom : scalar ) */ static tree c_parser_omp_clause_defaultmap (c_parser *parser, tree list) { location_t loc = c_parser_peek_token (parser)->location; tree c; const char *p; matching_parens parens; if (!parens.require_open (parser)) return list; if (!c_parser_next_token_is (parser, CPP_NAME)) { c_parser_error (parser, "expected %<tofrom%>"); goto out_err; } p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "tofrom") != 0) { c_parser_error (parser, "expected %<tofrom%>"); goto out_err; } c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) goto out_err; if (!c_parser_next_token_is (parser, CPP_NAME)) { c_parser_error (parser, "expected %<scalar%>"); goto out_err; } p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "scalar") != 0) { c_parser_error (parser, "expected %<scalar%>"); goto out_err; } c_parser_consume_token (parser); parens.skip_until_found_close (parser); check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULTMAP, "defaultmap"); c = build_omp_clause (loc, OMP_CLAUSE_DEFAULTMAP); OMP_CLAUSE_CHAIN (c) = list; return c; out_err: parens.skip_until_found_close (parser); return list; } /* OpenACC 2.0: use_device ( variable-list ) OpenMP 4.5: use_device_ptr ( variable-list ) */ static tree c_parser_omp_clause_use_device_ptr (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_USE_DEVICE_PTR, list); } /* OpenMP 4.5: is_device_ptr ( variable-list ) */ static tree c_parser_omp_clause_is_device_ptr (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_IS_DEVICE_PTR, list); } /* OpenACC: num_gangs ( expression ) num_workers ( expression ) vector_length ( expression ) */ static tree c_parser_oacc_single_int_clause (c_parser *parser, omp_clause_code code, tree list) { location_t loc = c_parser_peek_token (parser)->location; matching_parens parens; if (!parens.require_open (parser)) return list; location_t expr_loc = c_parser_peek_token (parser)->location; c_expr expr = c_parser_expression (parser); expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true); tree c, t = expr.value; t = c_fully_fold (t, false, NULL); parens.skip_until_found_close (parser); if (t == error_mark_node) return list; else if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (expr_loc, "%qs expression must be integral", omp_clause_code_name[code]); return list; } /* Attempt to statically determine when the number isn't positive. */ c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); protected_set_expr_location (c, expr_loc); if (c == boolean_true_node) { warning_at (expr_loc, 0, "%qs value must be positive", omp_clause_code_name[code]); t = integer_one_node; } check_no_duplicate_clause (list, code, omp_clause_code_name[code]); c = build_omp_clause (loc, code); OMP_CLAUSE_OPERAND (c, 0) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenACC: gang [( gang-arg-list )] worker [( [num:] int-expr )] vector [( [length:] int-expr )] where gang-arg is one of: [num:] int-expr static: size-expr and size-expr may be: * int-expr */ static tree c_parser_oacc_shape_clause (c_parser *parser, omp_clause_code kind, const char *str, tree list) { const char *id = "num"; tree ops[2] = { NULL_TREE, NULL_TREE }, c; location_t loc = c_parser_peek_token (parser)->location; if (kind == OMP_CLAUSE_VECTOR) id = "length"; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { c_parser_consume_token (parser); do { c_token *next = c_parser_peek_token (parser); int idx = 0; /* Gang static argument. */ if (kind == OMP_CLAUSE_GANG && c_parser_next_token_is_keyword (parser, RID_STATIC)) { c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) goto cleanup_error; idx = 1; if (ops[idx] != NULL_TREE) { c_parser_error (parser, "too many %<static%> arguments"); goto cleanup_error; } /* Check for the '*' argument. */ if (c_parser_next_token_is (parser, CPP_MULT) && (c_parser_peek_2nd_token (parser)->type == CPP_COMMA || c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); ops[idx] = integer_minus_one_node; if (c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); continue; } else break; } } /* Worker num: argument and vector length: arguments. */ else if (c_parser_next_token_is (parser, CPP_NAME) && strcmp (id, IDENTIFIER_POINTER (next->value)) == 0 && c_parser_peek_2nd_token (parser)->type == CPP_COLON) { c_parser_consume_token (parser); /* id */ c_parser_consume_token (parser); /* ':' */ } /* Now collect the actual argument. */ if (ops[idx] != NULL_TREE) { c_parser_error (parser, "unexpected argument"); goto cleanup_error; } location_t expr_loc = c_parser_peek_token (parser)->location; c_expr cexpr = c_parser_expr_no_commas (parser, NULL); cexpr = convert_lvalue_to_rvalue (expr_loc, cexpr, false, true); tree expr = cexpr.value; if (expr == error_mark_node) goto cleanup_error; expr = c_fully_fold (expr, false, NULL); /* Attempt to statically determine when the number isn't a positive integer. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (expr))) { c_parser_error (parser, "expected integer expression"); return list; } tree c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, expr, build_int_cst (TREE_TYPE (expr), 0)); if (c == boolean_true_node) { warning_at (loc, 0, "%qs value must be positive", str); expr = integer_one_node; } ops[idx] = expr; if (kind == OMP_CLAUSE_GANG && c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); continue; } break; } while (1); if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) goto cleanup_error; } check_no_duplicate_clause (list, kind, str); c = build_omp_clause (loc, kind); if (ops[1]) OMP_CLAUSE_OPERAND (c, 1) = ops[1]; OMP_CLAUSE_OPERAND (c, 0) = ops[0]; OMP_CLAUSE_CHAIN (c) = list; return c; cleanup_error: c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0); return list; } /* OpenACC: auto independent nohost seq */ static tree c_parser_oacc_simple_clause (c_parser *parser, enum omp_clause_code code, tree list) { check_no_duplicate_clause (list, code, omp_clause_code_name[code]); tree c = build_omp_clause (c_parser_peek_token (parser)->location, code); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenACC: async [( int-expr )] */ static tree c_parser_oacc_clause_async (c_parser *parser, tree list) { tree c, t; location_t loc = c_parser_peek_token (parser)->location; t = build_int_cst (integer_type_node, GOMP_ASYNC_NOVAL); if (c_parser_peek_token (parser)->type == CPP_OPEN_PAREN) { c_parser_consume_token (parser); t = c_parser_expression (parser).value; if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) c_parser_error (parser, "expected integer expression"); else if (t == error_mark_node || !c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) return list; } else t = c_fully_fold (t, false, NULL); check_no_duplicate_clause (list, OMP_CLAUSE_ASYNC, "async"); c = build_omp_clause (loc, OMP_CLAUSE_ASYNC); OMP_CLAUSE_ASYNC_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; return list; } /* OpenACC 2.0: tile ( size-expr-list ) */ static tree c_parser_oacc_clause_tile (c_parser *parser, tree list) { tree c, expr = error_mark_node; location_t loc; tree tile = NULL_TREE; check_no_duplicate_clause (list, OMP_CLAUSE_TILE, "tile"); check_no_duplicate_clause (list, OMP_CLAUSE_COLLAPSE, "collapse"); loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; do { if (tile && !c_parser_require (parser, CPP_COMMA, "expected %<,%>")) return list; if (c_parser_next_token_is (parser, CPP_MULT) && (c_parser_peek_2nd_token (parser)->type == CPP_COMMA || c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); expr = integer_zero_node; } else { location_t expr_loc = c_parser_peek_token (parser)->location; c_expr cexpr = c_parser_expr_no_commas (parser, NULL); cexpr = convert_lvalue_to_rvalue (expr_loc, cexpr, false, true); expr = cexpr.value; if (expr == error_mark_node) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return list; } expr = c_fully_fold (expr, false, NULL); if (!INTEGRAL_TYPE_P (TREE_TYPE (expr)) || !tree_fits_shwi_p (expr) || tree_to_shwi (expr) <= 0) { error_at (expr_loc, "%<tile%> argument needs positive" " integral constant"); expr = integer_zero_node; } } tile = tree_cons (NULL_TREE, expr, tile); } while (c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN)); /* Consume the trailing ')'. */ c_parser_consume_token (parser); c = build_omp_clause (loc, OMP_CLAUSE_TILE); tile = nreverse (tile); OMP_CLAUSE_TILE_LIST (c) = tile; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenACC: wait ( int-expr-list ) */ static tree c_parser_oacc_clause_wait (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; if (c_parser_peek_token (parser)->type == CPP_OPEN_PAREN) list = c_parser_oacc_wait_list (parser, clause_loc, list); return list; } /* OpenMP 2.5: ordered OpenMP 4.5: ordered ( constant-expression ) */ static tree c_parser_omp_clause_ordered (c_parser *parser, tree list) { check_no_duplicate_clause (list, OMP_CLAUSE_ORDERED, "ordered"); tree c, num = NULL_TREE; HOST_WIDE_INT n; location_t loc = c_parser_peek_token (parser)->location; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { matching_parens parens; parens.consume_open (parser); num = c_parser_expr_no_commas (parser, NULL).value; parens.skip_until_found_close (parser); } if (num == error_mark_node) return list; if (num) { mark_exp_read (num); num = c_fully_fold (num, false, NULL); if (!INTEGRAL_TYPE_P (TREE_TYPE (num)) || !tree_fits_shwi_p (num) || (n = tree_to_shwi (num)) <= 0 || (int) n != n) { error_at (loc, "ordered argument needs positive " "constant integer expression"); return list; } } c = build_omp_clause (loc, OMP_CLAUSE_ORDERED); OMP_CLAUSE_ORDERED_EXPR (c) = num; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: private ( variable-list ) */ static tree c_parser_omp_clause_private (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_PRIVATE, list); } /* OpenMP 2.5: reduction ( reduction-operator : variable-list ) reduction-operator: One of: + * - & ^ | && || OpenMP 3.1: reduction-operator: One of: + * - & ^ | && || max min OpenMP 4.0: reduction-operator: One of: + * - & ^ | && || identifier */ static tree c_parser_omp_clause_reduction (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; matching_parens parens; if (parens.require_open (parser)) { enum tree_code code = ERROR_MARK; tree reduc_id = NULL_TREE; switch (c_parser_peek_token (parser)->type) { case CPP_PLUS: code = PLUS_EXPR; break; case CPP_MULT: code = MULT_EXPR; break; case CPP_MINUS: code = MINUS_EXPR; break; case CPP_AND: code = BIT_AND_EXPR; break; case CPP_XOR: code = BIT_XOR_EXPR; break; case CPP_OR: code = BIT_IOR_EXPR; break; case CPP_AND_AND: code = TRUTH_ANDIF_EXPR; break; case CPP_OR_OR: code = TRUTH_ORIF_EXPR; break; case CPP_NAME: { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "min") == 0) { code = MIN_EXPR; break; } if (strcmp (p, "max") == 0) { code = MAX_EXPR; break; } reduc_id = c_parser_peek_token (parser)->value; break; } default: c_parser_error (parser, "expected %<+%>, %<*%>, %<-%>, %<&%>, " "%<^%>, %<|%>, %<&&%>, %<||%> or identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0); return list; } c_parser_consume_token (parser); reduc_id = c_omp_reduction_id (code, reduc_id); if (c_parser_require (parser, CPP_COLON, "expected %<:%>")) { tree nl, c; nl = c_parser_omp_variable_list (parser, clause_loc, OMP_CLAUSE_REDUCTION, list); for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c)) { tree d = OMP_CLAUSE_DECL (c), type; if (TREE_CODE (d) != TREE_LIST) type = TREE_TYPE (d); else { int cnt = 0; tree t; for (t = d; TREE_CODE (t) == TREE_LIST; t = TREE_CHAIN (t)) cnt++; type = TREE_TYPE (t); while (cnt > 0) { if (TREE_CODE (type) != POINTER_TYPE && TREE_CODE (type) != ARRAY_TYPE) break; type = TREE_TYPE (type); cnt--; } } while (TREE_CODE (type) == ARRAY_TYPE) type = TREE_TYPE (type); OMP_CLAUSE_REDUCTION_CODE (c) = code; if (code == ERROR_MARK || !(INTEGRAL_TYPE_P (type) || TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == COMPLEX_TYPE)) OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = c_omp_reduction_lookup (reduc_id, TYPE_MAIN_VARIANT (type)); } list = nl; } parens.skip_until_found_close (parser); } return list; } /* OpenMP 2.5: schedule ( schedule-kind ) schedule ( schedule-kind , expression ) schedule-kind: static | dynamic | guided | runtime | auto OpenMP 4.5: schedule ( schedule-modifier : schedule-kind ) schedule ( schedule-modifier [ , schedule-modifier ] : schedule-kind , expression ) schedule-modifier: simd monotonic nonmonotonic */ static tree c_parser_omp_clause_schedule (c_parser *parser, tree list) { tree c, t; location_t loc = c_parser_peek_token (parser)->location; int modifiers = 0, nmodifiers = 0; matching_parens parens; if (!parens.require_open (parser)) return list; c = build_omp_clause (loc, OMP_CLAUSE_SCHEDULE); while (c_parser_next_token_is (parser, CPP_NAME)) { tree kind = c_parser_peek_token (parser)->value; const char *p = IDENTIFIER_POINTER (kind); if (strcmp ("simd", p) == 0) OMP_CLAUSE_SCHEDULE_SIMD (c) = 1; else if (strcmp ("monotonic", p) == 0) modifiers |= OMP_CLAUSE_SCHEDULE_MONOTONIC; else if (strcmp ("nonmonotonic", p) == 0) modifiers |= OMP_CLAUSE_SCHEDULE_NONMONOTONIC; else break; c_parser_consume_token (parser); if (nmodifiers++ == 0 && c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else { c_parser_require (parser, CPP_COLON, "expected %<:%>"); break; } } if ((modifiers & (OMP_CLAUSE_SCHEDULE_MONOTONIC | OMP_CLAUSE_SCHEDULE_NONMONOTONIC)) == (OMP_CLAUSE_SCHEDULE_MONOTONIC | OMP_CLAUSE_SCHEDULE_NONMONOTONIC)) { error_at (loc, "both %<monotonic%> and %<nonmonotonic%> modifiers " "specified"); modifiers = 0; } if (c_parser_next_token_is (parser, CPP_NAME)) { tree kind = c_parser_peek_token (parser)->value; const char *p = IDENTIFIER_POINTER (kind); switch (p[0]) { case 'd': if (strcmp ("dynamic", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC; break; case 'g': if (strcmp ("guided", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED; break; case 'r': if (strcmp ("runtime", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME; break; default: goto invalid_kind; } } else if (c_parser_next_token_is_keyword (parser, RID_STATIC)) OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC; else if (c_parser_next_token_is_keyword (parser, RID_AUTO)) OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_AUTO; else goto invalid_kind; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) { location_t here; c_parser_consume_token (parser); here = c_parser_peek_token (parser)->location; c_expr expr = c_parser_expr_no_commas (parser, NULL); expr = convert_lvalue_to_rvalue (here, expr, false, true); t = expr.value; t = c_fully_fold (t, false, NULL); if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME) error_at (here, "schedule %<runtime%> does not take " "a %<chunk_size%> parameter"); else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_AUTO) error_at (here, "schedule %<auto%> does not take " "a %<chunk_size%> parameter"); else if (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE) { /* Attempt to statically determine when the number isn't positive. */ tree s = fold_build2_loc (loc, LE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); protected_set_expr_location (s, loc); if (s == boolean_true_node) { warning_at (loc, 0, "chunk size value must be positive"); t = integer_one_node; } OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t; } else c_parser_error (parser, "expected integer expression"); parens.skip_until_found_close (parser); } else c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<,%> or %<)%>"); OMP_CLAUSE_SCHEDULE_KIND (c) = (enum omp_clause_schedule_kind) (OMP_CLAUSE_SCHEDULE_KIND (c) | modifiers); check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule"); OMP_CLAUSE_CHAIN (c) = list; return c; invalid_kind: c_parser_error (parser, "invalid schedule kind"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0); return list; } /* OpenMP 2.5: shared ( variable-list ) */ static tree c_parser_omp_clause_shared (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_SHARED, list); } /* OpenMP 3.0: untied */ static tree c_parser_omp_clause_untied (c_parser *parser ATTRIBUTE_UNUSED, tree list) { tree c; /* FIXME: Should we allow duplicates? */ check_no_duplicate_clause (list, OMP_CLAUSE_UNTIED, "untied"); c = build_omp_clause (c_parser_peek_token (parser)->location, OMP_CLAUSE_UNTIED); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.0: inbranch notinbranch */ static tree c_parser_omp_clause_branch (c_parser *parser ATTRIBUTE_UNUSED, enum omp_clause_code code, tree list) { check_no_duplicate_clause (list, code, omp_clause_code_name[code]); tree c = build_omp_clause (c_parser_peek_token (parser)->location, code); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.0: parallel for sections taskgroup */ static tree c_parser_omp_clause_cancelkind (c_parser *parser ATTRIBUTE_UNUSED, enum omp_clause_code code, tree list) { tree c = build_omp_clause (c_parser_peek_token (parser)->location, code); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.5: nogroup */ static tree c_parser_omp_clause_nogroup (c_parser *parser ATTRIBUTE_UNUSED, tree list) { check_no_duplicate_clause (list, OMP_CLAUSE_NOGROUP, "nogroup"); tree c = build_omp_clause (c_parser_peek_token (parser)->location, OMP_CLAUSE_NOGROUP); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.5: simd threads */ static tree c_parser_omp_clause_orderedkind (c_parser *parser ATTRIBUTE_UNUSED, enum omp_clause_code code, tree list) { check_no_duplicate_clause (list, code, omp_clause_code_name[code]); tree c = build_omp_clause (c_parser_peek_token (parser)->location, code); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.0: num_teams ( expression ) */ static tree c_parser_omp_clause_num_teams (c_parser *parser, tree list) { location_t num_teams_loc = c_parser_peek_token (parser)->location; matching_parens parens; if (parens.require_open (parser)) { location_t expr_loc = c_parser_peek_token (parser)->location; c_expr expr = c_parser_expression (parser); expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true); tree c, t = expr.value; t = c_fully_fold (t, false, NULL); parens.skip_until_found_close (parser); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { c_parser_error (parser, "expected integer expression"); return list; } /* Attempt to statically determine when the number isn't positive. */ c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); protected_set_expr_location (c, expr_loc); if (c == boolean_true_node) { warning_at (expr_loc, 0, "%<num_teams%> value must be positive"); t = integer_one_node; } check_no_duplicate_clause (list, OMP_CLAUSE_NUM_TEAMS, "num_teams"); c = build_omp_clause (num_teams_loc, OMP_CLAUSE_NUM_TEAMS); OMP_CLAUSE_NUM_TEAMS_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } return list; } /* OpenMP 4.0: thread_limit ( expression ) */ static tree c_parser_omp_clause_thread_limit (c_parser *parser, tree list) { location_t num_thread_limit_loc = c_parser_peek_token (parser)->location; matching_parens parens; if (parens.require_open (parser)) { location_t expr_loc = c_parser_peek_token (parser)->location; c_expr expr = c_parser_expression (parser); expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true); tree c, t = expr.value; t = c_fully_fold (t, false, NULL); parens.skip_until_found_close (parser); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { c_parser_error (parser, "expected integer expression"); return list; } /* Attempt to statically determine when the number isn't positive. */ c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); protected_set_expr_location (c, expr_loc); if (c == boolean_true_node) { warning_at (expr_loc, 0, "%<thread_limit%> value must be positive"); t = integer_one_node; } check_no_duplicate_clause (list, OMP_CLAUSE_THREAD_LIMIT, "thread_limit"); c = build_omp_clause (num_thread_limit_loc, OMP_CLAUSE_THREAD_LIMIT); OMP_CLAUSE_THREAD_LIMIT_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } return list; } /* OpenMP 4.0: aligned ( variable-list ) aligned ( variable-list : constant-expression ) */ static tree c_parser_omp_clause_aligned (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; tree nl, c; matching_parens parens; if (!parens.require_open (parser)) return list; nl = c_parser_omp_variable_list (parser, clause_loc, OMP_CLAUSE_ALIGNED, list); if (c_parser_next_token_is (parser, CPP_COLON)) { c_parser_consume_token (parser); location_t expr_loc = c_parser_peek_token (parser)->location; c_expr expr = c_parser_expr_no_commas (parser, NULL); expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true); tree alignment = expr.value; alignment = c_fully_fold (alignment, false, NULL); if (TREE_CODE (alignment) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (alignment)) || tree_int_cst_sgn (alignment) != 1) { error_at (clause_loc, "%<aligned%> clause alignment expression must " "be positive constant integer expression"); alignment = NULL_TREE; } for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c)) OMP_CLAUSE_ALIGNED_ALIGNMENT (c) = alignment; } parens.skip_until_found_close (parser); return nl; } /* OpenMP 4.0: linear ( variable-list ) linear ( variable-list : expression ) OpenMP 4.5: linear ( modifier ( variable-list ) ) linear ( modifier ( variable-list ) : expression ) */ static tree c_parser_omp_clause_linear (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; tree nl, c, step; enum omp_clause_linear_kind kind = OMP_CLAUSE_LINEAR_DEFAULT; matching_parens parens; if (!parens.require_open (parser)) return list; if (c_parser_next_token_is (parser, CPP_NAME)) { c_token *tok = c_parser_peek_token (parser); const char *p = IDENTIFIER_POINTER (tok->value); if (strcmp ("val", p) == 0) kind = OMP_CLAUSE_LINEAR_VAL; if (c_parser_peek_2nd_token (parser)->type != CPP_OPEN_PAREN) kind = OMP_CLAUSE_LINEAR_DEFAULT; if (kind != OMP_CLAUSE_LINEAR_DEFAULT) { c_parser_consume_token (parser); c_parser_consume_token (parser); } } nl = c_parser_omp_variable_list (parser, clause_loc, OMP_CLAUSE_LINEAR, list); if (kind != OMP_CLAUSE_LINEAR_DEFAULT) parens.skip_until_found_close (parser); if (c_parser_next_token_is (parser, CPP_COLON)) { c_parser_consume_token (parser); location_t expr_loc = c_parser_peek_token (parser)->location; c_expr expr = c_parser_expression (parser); expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true); step = expr.value; step = c_fully_fold (step, false, NULL); if (!INTEGRAL_TYPE_P (TREE_TYPE (step))) { error_at (clause_loc, "%<linear%> clause step expression must " "be integral"); step = integer_one_node; } } else step = integer_one_node; for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c)) { OMP_CLAUSE_LINEAR_STEP (c) = step; OMP_CLAUSE_LINEAR_KIND (c) = kind; } parens.skip_until_found_close (parser); return nl; } /* OpenMP 4.0: safelen ( constant-expression ) */ static tree c_parser_omp_clause_safelen (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; tree c, t; matching_parens parens; if (!parens.require_open (parser)) return list; location_t expr_loc = c_parser_peek_token (parser)->location; c_expr expr = c_parser_expr_no_commas (parser, NULL); expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true); t = expr.value; t = c_fully_fold (t, false, NULL); if (TREE_CODE (t) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (t)) || tree_int_cst_sgn (t) != 1) { error_at (clause_loc, "%<safelen%> clause expression must " "be positive constant integer expression"); t = NULL_TREE; } parens.skip_until_found_close (parser); if (t == NULL_TREE || t == error_mark_node) return list; check_no_duplicate_clause (list, OMP_CLAUSE_SAFELEN, "safelen"); c = build_omp_clause (clause_loc, OMP_CLAUSE_SAFELEN); OMP_CLAUSE_SAFELEN_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.0: simdlen ( constant-expression ) */ static tree c_parser_omp_clause_simdlen (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; tree c, t; matching_parens parens; if (!parens.require_open (parser)) return list; location_t expr_loc = c_parser_peek_token (parser)->location; c_expr expr = c_parser_expr_no_commas (parser, NULL); expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true); t = expr.value; t = c_fully_fold (t, false, NULL); if (TREE_CODE (t) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (t)) || tree_int_cst_sgn (t) != 1) { error_at (clause_loc, "%<simdlen%> clause expression must " "be positive constant integer expression"); t = NULL_TREE; } parens.skip_until_found_close (parser); if (t == NULL_TREE || t == error_mark_node) return list; check_no_duplicate_clause (list, OMP_CLAUSE_SIMDLEN, "simdlen"); c = build_omp_clause (clause_loc, OMP_CLAUSE_SIMDLEN); OMP_CLAUSE_SIMDLEN_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.5: vec: identifier [+/- integer] vec , identifier [+/- integer] */ static tree c_parser_omp_clause_depend_sink (c_parser *parser, location_t clause_loc, tree list) { tree vec = NULL; if (c_parser_next_token_is_not (parser, CPP_NAME) || c_parser_peek_token (parser)->id_kind != C_ID_ID) { c_parser_error (parser, "expected identifier"); return list; } while (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID) { tree t = lookup_name (c_parser_peek_token (parser)->value); tree addend = NULL; if (t == NULL_TREE) { undeclared_variable (c_parser_peek_token (parser)->location, c_parser_peek_token (parser)->value); t = error_mark_node; } c_parser_consume_token (parser); bool neg = false; if (c_parser_next_token_is (parser, CPP_MINUS)) neg = true; else if (!c_parser_next_token_is (parser, CPP_PLUS)) { addend = integer_zero_node; neg = false; goto add_to_vector; } c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NUMBER)) { c_parser_error (parser, "expected integer"); return list; } addend = c_parser_peek_token (parser)->value; if (TREE_CODE (addend) != INTEGER_CST) { c_parser_error (parser, "expected integer"); return list; } c_parser_consume_token (parser); add_to_vector: if (t != error_mark_node) { vec = tree_cons (addend, t, vec); if (neg) OMP_CLAUSE_DEPEND_SINK_NEGATIVE (vec) = 1; } if (c_parser_next_token_is_not (parser, CPP_COMMA)) break; c_parser_consume_token (parser); } if (vec == NULL_TREE) return list; tree u = build_omp_clause (clause_loc, OMP_CLAUSE_DEPEND); OMP_CLAUSE_DEPEND_KIND (u) = OMP_CLAUSE_DEPEND_SINK; OMP_CLAUSE_DECL (u) = nreverse (vec); OMP_CLAUSE_CHAIN (u) = list; return u; } /* OpenMP 4.0: depend ( depend-kind: variable-list ) depend-kind: in | out | inout OpenMP 4.5: depend ( source ) depend ( sink : vec ) */ static tree c_parser_omp_clause_depend (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_INOUT; tree nl, c; matching_parens parens; if (!parens.require_open (parser)) return list; if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp ("in", p) == 0) kind = OMP_CLAUSE_DEPEND_IN; else if (strcmp ("inout", p) == 0) kind = OMP_CLAUSE_DEPEND_INOUT; else if (strcmp ("out", p) == 0) kind = OMP_CLAUSE_DEPEND_OUT; else if (strcmp ("source", p) == 0) kind = OMP_CLAUSE_DEPEND_SOURCE; else if (strcmp ("sink", p) == 0) kind = OMP_CLAUSE_DEPEND_SINK; else goto invalid_kind; } else goto invalid_kind; c_parser_consume_token (parser); if (kind == OMP_CLAUSE_DEPEND_SOURCE) { c = build_omp_clause (clause_loc, OMP_CLAUSE_DEPEND); OMP_CLAUSE_DEPEND_KIND (c) = kind; OMP_CLAUSE_DECL (c) = NULL_TREE; OMP_CLAUSE_CHAIN (c) = list; parens.skip_until_found_close (parser); return c; } if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) goto resync_fail; if (kind == OMP_CLAUSE_DEPEND_SINK) nl = c_parser_omp_clause_depend_sink (parser, clause_loc, list); else { nl = c_parser_omp_variable_list (parser, clause_loc, OMP_CLAUSE_DEPEND, list); for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c)) OMP_CLAUSE_DEPEND_KIND (c) = kind; } parens.skip_until_found_close (parser); return nl; invalid_kind: c_parser_error (parser, "invalid depend kind"); resync_fail: parens.skip_until_found_close (parser); return list; } /* OpenMP 4.0: map ( map-kind: variable-list ) map ( variable-list ) map-kind: alloc | to | from | tofrom OpenMP 4.5: map-kind: alloc | to | from | tofrom | release | delete map ( always [,] map-kind: variable-list ) */ static tree c_parser_omp_clause_map (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; enum gomp_map_kind kind = GOMP_MAP_TOFROM; int always = 0; enum c_id_kind always_id_kind = C_ID_NONE; location_t always_loc = UNKNOWN_LOCATION; tree always_id = NULL_TREE; tree nl, c; matching_parens parens; if (!parens.require_open (parser)) return list; if (c_parser_next_token_is (parser, CPP_NAME)) { c_token *tok = c_parser_peek_token (parser); const char *p = IDENTIFIER_POINTER (tok->value); always_id_kind = tok->id_kind; always_loc = tok->location; always_id = tok->value; if (strcmp ("always", p) == 0) { c_token *sectok = c_parser_peek_2nd_token (parser); if (sectok->type == CPP_COMMA) { c_parser_consume_token (parser); c_parser_consume_token (parser); always = 2; } else if (sectok->type == CPP_NAME) { p = IDENTIFIER_POINTER (sectok->value); if (strcmp ("alloc", p) == 0 || strcmp ("to", p) == 0 || strcmp ("from", p) == 0 || strcmp ("tofrom", p) == 0 || strcmp ("release", p) == 0 || strcmp ("delete", p) == 0) { c_parser_consume_token (parser); always = 1; } } } } if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp ("alloc", p) == 0) kind = GOMP_MAP_ALLOC; else if (strcmp ("to", p) == 0) kind = always ? GOMP_MAP_ALWAYS_TO : GOMP_MAP_TO; else if (strcmp ("from", p) == 0) kind = always ? GOMP_MAP_ALWAYS_FROM : GOMP_MAP_FROM; else if (strcmp ("tofrom", p) == 0) kind = always ? GOMP_MAP_ALWAYS_TOFROM : GOMP_MAP_TOFROM; else if (strcmp ("release", p) == 0) kind = GOMP_MAP_RELEASE; else if (strcmp ("delete", p) == 0) kind = GOMP_MAP_DELETE; else { c_parser_error (parser, "invalid map kind"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return list; } c_parser_consume_token (parser); c_parser_consume_token (parser); } else if (always) { if (always_id_kind != C_ID_ID) { c_parser_error (parser, "expected identifier"); parens.skip_until_found_close (parser); return list; } tree t = lookup_name (always_id); if (t == NULL_TREE) { undeclared_variable (always_loc, always_id); t = error_mark_node; } if (t != error_mark_node) { tree u = build_omp_clause (clause_loc, OMP_CLAUSE_MAP); OMP_CLAUSE_DECL (u) = t; OMP_CLAUSE_CHAIN (u) = list; OMP_CLAUSE_SET_MAP_KIND (u, kind); list = u; } if (always == 1) { parens.skip_until_found_close (parser); return list; } } nl = c_parser_omp_variable_list (parser, clause_loc, OMP_CLAUSE_MAP, list); for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c)) OMP_CLAUSE_SET_MAP_KIND (c, kind); parens.skip_until_found_close (parser); return nl; } /* OpenMP 4.0: device ( expression ) */ static tree c_parser_omp_clause_device (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; matching_parens parens; if (parens.require_open (parser)) { location_t expr_loc = c_parser_peek_token (parser)->location; c_expr expr = c_parser_expr_no_commas (parser, NULL); expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true); tree c, t = expr.value; t = c_fully_fold (t, false, NULL); parens.skip_until_found_close (parser); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { c_parser_error (parser, "expected integer expression"); return list; } check_no_duplicate_clause (list, OMP_CLAUSE_DEVICE, "device"); c = build_omp_clause (clause_loc, OMP_CLAUSE_DEVICE); OMP_CLAUSE_DEVICE_ID (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } return list; } /* OpenMP 4.0: dist_schedule ( static ) dist_schedule ( static , expression ) */ static tree c_parser_omp_clause_dist_schedule (c_parser *parser, tree list) { tree c, t = NULL_TREE; location_t loc = c_parser_peek_token (parser)->location; matching_parens parens; if (!parens.require_open (parser)) return list; if (!c_parser_next_token_is_keyword (parser, RID_STATIC)) { c_parser_error (parser, "invalid dist_schedule kind"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return list; } c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); location_t expr_loc = c_parser_peek_token (parser)->location; c_expr expr = c_parser_expr_no_commas (parser, NULL); expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true); t = expr.value; t = c_fully_fold (t, false, NULL); parens.skip_until_found_close (parser); } else c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<,%> or %<)%>"); check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule"); if (t == error_mark_node) return list; c = build_omp_clause (loc, OMP_CLAUSE_DIST_SCHEDULE); OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.0: proc_bind ( proc-bind-kind ) proc-bind-kind: master | close | spread */ static tree c_parser_omp_clause_proc_bind (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; enum omp_clause_proc_bind_kind kind; tree c; matching_parens parens; if (!parens.require_open (parser)) return list; if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp ("master", p) == 0) kind = OMP_CLAUSE_PROC_BIND_MASTER; else if (strcmp ("close", p) == 0) kind = OMP_CLAUSE_PROC_BIND_CLOSE; else if (strcmp ("spread", p) == 0) kind = OMP_CLAUSE_PROC_BIND_SPREAD; else goto invalid_kind; } else goto invalid_kind; c_parser_consume_token (parser); parens.skip_until_found_close (parser); c = build_omp_clause (clause_loc, OMP_CLAUSE_PROC_BIND); OMP_CLAUSE_PROC_BIND_KIND (c) = kind; OMP_CLAUSE_CHAIN (c) = list; return c; invalid_kind: c_parser_error (parser, "invalid proc_bind kind"); parens.skip_until_found_close (parser); return list; } /* OpenMP 4.0: to ( variable-list ) */ static tree c_parser_omp_clause_to (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_TO, list); } /* OpenMP 4.0: from ( variable-list ) */ static tree c_parser_omp_clause_from (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_FROM, list); } /* OpenMP 4.0: uniform ( variable-list ) */ static tree c_parser_omp_clause_uniform (c_parser *parser, tree list) { /* The clauses location. */ location_t loc = c_parser_peek_token (parser)->location; matching_parens parens; if (parens.require_open (parser)) { list = c_parser_omp_variable_list (parser, loc, OMP_CLAUSE_UNIFORM, list); parens.skip_until_found_close (parser); } return list; } /* Parse all OpenACC clauses. The set clauses allowed by the directive is a bitmask in MASK. Return the list of clauses found. */ static tree c_parser_oacc_all_clauses (c_parser *parser, omp_clause_mask mask, const char *where, bool finish_p = true) { tree clauses = NULL; bool first = true; while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) { location_t here; pragma_omp_clause c_kind; const char *c_name; tree prev = clauses; if (!first && c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); here = c_parser_peek_token (parser)->location; c_kind = c_parser_omp_clause_name (parser); switch (c_kind) { case PRAGMA_OACC_CLAUSE_ASYNC: clauses = c_parser_oacc_clause_async (parser, clauses); c_name = "async"; break; case PRAGMA_OACC_CLAUSE_AUTO: clauses = c_parser_oacc_simple_clause (parser, OMP_CLAUSE_AUTO, clauses); c_name = "auto"; break; case PRAGMA_OACC_CLAUSE_COLLAPSE: clauses = c_parser_omp_clause_collapse (parser, clauses); c_name = "collapse"; break; case PRAGMA_OACC_CLAUSE_COPY: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "copy"; break; case PRAGMA_OACC_CLAUSE_COPYIN: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "copyin"; break; case PRAGMA_OACC_CLAUSE_COPYOUT: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "copyout"; break; case PRAGMA_OACC_CLAUSE_CREATE: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "create"; break; case PRAGMA_OACC_CLAUSE_DELETE: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "delete"; break; case PRAGMA_OMP_CLAUSE_DEFAULT: clauses = c_parser_omp_clause_default (parser, clauses, true); c_name = "default"; break; case PRAGMA_OACC_CLAUSE_DEVICE: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "device"; break; case PRAGMA_OACC_CLAUSE_DEVICEPTR: clauses = c_parser_oacc_data_clause_deviceptr (parser, clauses); c_name = "deviceptr"; break; case PRAGMA_OACC_CLAUSE_DEVICE_RESIDENT: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "device_resident"; break; case PRAGMA_OACC_CLAUSE_FIRSTPRIVATE: clauses = c_parser_omp_clause_firstprivate (parser, clauses); c_name = "firstprivate"; break; case PRAGMA_OACC_CLAUSE_GANG: c_name = "gang"; clauses = c_parser_oacc_shape_clause (parser, OMP_CLAUSE_GANG, c_name, clauses); break; case PRAGMA_OACC_CLAUSE_HOST: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "host"; break; case PRAGMA_OACC_CLAUSE_IF: clauses = c_parser_omp_clause_if (parser, clauses, false); c_name = "if"; break; case PRAGMA_OACC_CLAUSE_INDEPENDENT: clauses = c_parser_oacc_simple_clause (parser, OMP_CLAUSE_INDEPENDENT, clauses); c_name = "independent"; break; case PRAGMA_OACC_CLAUSE_LINK: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "link"; break; case PRAGMA_OACC_CLAUSE_NUM_GANGS: clauses = c_parser_oacc_single_int_clause (parser, OMP_CLAUSE_NUM_GANGS, clauses); c_name = "num_gangs"; break; case PRAGMA_OACC_CLAUSE_NUM_WORKERS: clauses = c_parser_oacc_single_int_clause (parser, OMP_CLAUSE_NUM_WORKERS, clauses); c_name = "num_workers"; break; case PRAGMA_OACC_CLAUSE_PRESENT: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "present"; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "present_or_copy"; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "present_or_copyin"; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "present_or_copyout"; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "present_or_create"; break; case PRAGMA_OACC_CLAUSE_PRIVATE: clauses = c_parser_omp_clause_private (parser, clauses); c_name = "private"; break; case PRAGMA_OACC_CLAUSE_REDUCTION: clauses = c_parser_omp_clause_reduction (parser, clauses); c_name = "reduction"; break; case PRAGMA_OACC_CLAUSE_SELF: clauses = c_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "self"; break; case PRAGMA_OACC_CLAUSE_SEQ: clauses = c_parser_oacc_simple_clause (parser, OMP_CLAUSE_SEQ, clauses); c_name = "seq"; break; case PRAGMA_OACC_CLAUSE_TILE: clauses = c_parser_oacc_clause_tile (parser, clauses); c_name = "tile"; break; case PRAGMA_OACC_CLAUSE_USE_DEVICE: clauses = c_parser_omp_clause_use_device_ptr (parser, clauses); c_name = "use_device"; break; case PRAGMA_OACC_CLAUSE_VECTOR: c_name = "vector"; clauses = c_parser_oacc_shape_clause (parser, OMP_CLAUSE_VECTOR, c_name, clauses); break; case PRAGMA_OACC_CLAUSE_VECTOR_LENGTH: clauses = c_parser_oacc_single_int_clause (parser, OMP_CLAUSE_VECTOR_LENGTH, clauses); c_name = "vector_length"; break; case PRAGMA_OACC_CLAUSE_WAIT: clauses = c_parser_oacc_clause_wait (parser, clauses); c_name = "wait"; break; case PRAGMA_OACC_CLAUSE_WORKER: c_name = "worker"; clauses = c_parser_oacc_shape_clause (parser, OMP_CLAUSE_WORKER, c_name, clauses); break; default: c_parser_error (parser, "expected %<#pragma acc%> clause"); goto saw_error; } first = false; if (((mask >> c_kind) & 1) == 0) { /* Remove the invalid clause(s) from the list to avoid confusing the rest of the compiler. */ clauses = prev; error_at (here, "%qs is not valid for %qs", c_name, where); } } saw_error: c_parser_skip_to_pragma_eol (parser); if (finish_p) return c_finish_omp_clauses (clauses, C_ORT_ACC); return clauses; } /* Parse all OpenMP clauses. The set clauses allowed by the directive is a bitmask in MASK. Return the list of clauses found. */ static tree c_parser_omp_all_clauses (c_parser *parser, omp_clause_mask mask, const char *where, bool finish_p = true) { tree clauses = NULL; bool first = true; while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) { location_t here; pragma_omp_clause c_kind; const char *c_name; tree prev = clauses; if (!first && c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); here = c_parser_peek_token (parser)->location; c_kind = c_parser_omp_clause_name (parser); switch (c_kind) { case PRAGMA_OMP_CLAUSE_COLLAPSE: clauses = c_parser_omp_clause_collapse (parser, clauses); c_name = "collapse"; break; case PRAGMA_OMP_CLAUSE_COPYIN: clauses = c_parser_omp_clause_copyin (parser, clauses); c_name = "copyin"; break; case PRAGMA_OMP_CLAUSE_COPYPRIVATE: clauses = c_parser_omp_clause_copyprivate (parser, clauses); c_name = "copyprivate"; break; case PRAGMA_OMP_CLAUSE_DEFAULT: clauses = c_parser_omp_clause_default (parser, clauses, false); c_name = "default"; break; case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE: clauses = c_parser_omp_clause_firstprivate (parser, clauses); c_name = "firstprivate"; break; case PRAGMA_OMP_CLAUSE_FINAL: clauses = c_parser_omp_clause_final (parser, clauses); c_name = "final"; break; case PRAGMA_OMP_CLAUSE_GRAINSIZE: clauses = c_parser_omp_clause_grainsize (parser, clauses); c_name = "grainsize"; break; case PRAGMA_OMP_CLAUSE_HINT: clauses = c_parser_omp_clause_hint (parser, clauses); c_name = "hint"; break; case PRAGMA_OMP_CLAUSE_DEFAULTMAP: clauses = c_parser_omp_clause_defaultmap (parser, clauses); c_name = "defaultmap"; break; case PRAGMA_OMP_CLAUSE_IF: clauses = c_parser_omp_clause_if (parser, clauses, true); c_name = "if"; break; case PRAGMA_OMP_CLAUSE_LASTPRIVATE: clauses = c_parser_omp_clause_lastprivate (parser, clauses); c_name = "lastprivate"; break; case PRAGMA_OMP_CLAUSE_MERGEABLE: clauses = c_parser_omp_clause_mergeable (parser, clauses); c_name = "mergeable"; break; case PRAGMA_OMP_CLAUSE_NOWAIT: clauses = c_parser_omp_clause_nowait (parser, clauses); c_name = "nowait"; break; case PRAGMA_OMP_CLAUSE_NUM_TASKS: clauses = c_parser_omp_clause_num_tasks (parser, clauses); c_name = "num_tasks"; break; case PRAGMA_OMP_CLAUSE_NUM_THREADS: clauses = c_parser_omp_clause_num_threads (parser, clauses); c_name = "num_threads"; break; case PRAGMA_OMP_CLAUSE_ORDERED: clauses = c_parser_omp_clause_ordered (parser, clauses); c_name = "ordered"; break; case PRAGMA_OMP_CLAUSE_PRIORITY: clauses = c_parser_omp_clause_priority (parser, clauses); c_name = "priority"; break; case PRAGMA_OMP_CLAUSE_PRIVATE: clauses = c_parser_omp_clause_private (parser, clauses); c_name = "private"; break; case PRAGMA_OMP_CLAUSE_REDUCTION: clauses = c_parser_omp_clause_reduction (parser, clauses); c_name = "reduction"; break; case PRAGMA_OMP_CLAUSE_SCHEDULE: clauses = c_parser_omp_clause_schedule (parser, clauses); c_name = "schedule"; break; case PRAGMA_OMP_CLAUSE_SHARED: clauses = c_parser_omp_clause_shared (parser, clauses); c_name = "shared"; break; case PRAGMA_OMP_CLAUSE_UNTIED: clauses = c_parser_omp_clause_untied (parser, clauses); c_name = "untied"; break; case PRAGMA_OMP_CLAUSE_INBRANCH: clauses = c_parser_omp_clause_branch (parser, OMP_CLAUSE_INBRANCH, clauses); c_name = "inbranch"; break; case PRAGMA_OMP_CLAUSE_NOTINBRANCH: clauses = c_parser_omp_clause_branch (parser, OMP_CLAUSE_NOTINBRANCH, clauses); c_name = "notinbranch"; break; case PRAGMA_OMP_CLAUSE_PARALLEL: clauses = c_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_PARALLEL, clauses); c_name = "parallel"; if (!first) { clause_not_first: error_at (here, "%qs must be the first clause of %qs", c_name, where); clauses = prev; } break; case PRAGMA_OMP_CLAUSE_FOR: clauses = c_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_FOR, clauses); c_name = "for"; if (!first) goto clause_not_first; break; case PRAGMA_OMP_CLAUSE_SECTIONS: clauses = c_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_SECTIONS, clauses); c_name = "sections"; if (!first) goto clause_not_first; break; case PRAGMA_OMP_CLAUSE_TASKGROUP: clauses = c_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_TASKGROUP, clauses); c_name = "taskgroup"; if (!first) goto clause_not_first; break; case PRAGMA_OMP_CLAUSE_LINK: clauses = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_LINK, clauses); c_name = "link"; break; case PRAGMA_OMP_CLAUSE_TO: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINK)) != 0) clauses = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_TO_DECLARE, clauses); else clauses = c_parser_omp_clause_to (parser, clauses); c_name = "to"; break; case PRAGMA_OMP_CLAUSE_FROM: clauses = c_parser_omp_clause_from (parser, clauses); c_name = "from"; break; case PRAGMA_OMP_CLAUSE_UNIFORM: clauses = c_parser_omp_clause_uniform (parser, clauses); c_name = "uniform"; break; case PRAGMA_OMP_CLAUSE_NUM_TEAMS: clauses = c_parser_omp_clause_num_teams (parser, clauses); c_name = "num_teams"; break; case PRAGMA_OMP_CLAUSE_THREAD_LIMIT: clauses = c_parser_omp_clause_thread_limit (parser, clauses); c_name = "thread_limit"; break; case PRAGMA_OMP_CLAUSE_ALIGNED: clauses = c_parser_omp_clause_aligned (parser, clauses); c_name = "aligned"; break; case PRAGMA_OMP_CLAUSE_LINEAR: clauses = c_parser_omp_clause_linear (parser, clauses); c_name = "linear"; break; case PRAGMA_OMP_CLAUSE_DEPEND: clauses = c_parser_omp_clause_depend (parser, clauses); c_name = "depend"; break; case PRAGMA_OMP_CLAUSE_MAP: clauses = c_parser_omp_clause_map (parser, clauses); c_name = "map"; break; case PRAGMA_OMP_CLAUSE_USE_DEVICE_PTR: clauses = c_parser_omp_clause_use_device_ptr (parser, clauses); c_name = "use_device_ptr"; break; case PRAGMA_OMP_CLAUSE_IS_DEVICE_PTR: clauses = c_parser_omp_clause_is_device_ptr (parser, clauses); c_name = "is_device_ptr"; break; case PRAGMA_OMP_CLAUSE_DEVICE: clauses = c_parser_omp_clause_device (parser, clauses); c_name = "device"; break; case PRAGMA_OMP_CLAUSE_DIST_SCHEDULE: clauses = c_parser_omp_clause_dist_schedule (parser, clauses); c_name = "dist_schedule"; break; case PRAGMA_OMP_CLAUSE_PROC_BIND: clauses = c_parser_omp_clause_proc_bind (parser, clauses); c_name = "proc_bind"; break; case PRAGMA_OMP_CLAUSE_SAFELEN: clauses = c_parser_omp_clause_safelen (parser, clauses); c_name = "safelen"; break; case PRAGMA_OMP_CLAUSE_SIMDLEN: clauses = c_parser_omp_clause_simdlen (parser, clauses); c_name = "simdlen"; break; case PRAGMA_OMP_CLAUSE_NOGROUP: clauses = c_parser_omp_clause_nogroup (parser, clauses); c_name = "nogroup"; break; case PRAGMA_OMP_CLAUSE_THREADS: clauses = c_parser_omp_clause_orderedkind (parser, OMP_CLAUSE_THREADS, clauses); c_name = "threads"; break; case PRAGMA_OMP_CLAUSE_SIMD: clauses = c_parser_omp_clause_orderedkind (parser, OMP_CLAUSE_SIMD, clauses); c_name = "simd"; break; default: c_parser_error (parser, "expected %<#pragma omp%> clause"); goto saw_error; } first = false; if (((mask >> c_kind) & 1) == 0) { /* Remove the invalid clause(s) from the list to avoid confusing the rest of the compiler. */ clauses = prev; error_at (here, "%qs is not valid for %qs", c_name, where); } } saw_error: c_parser_skip_to_pragma_eol (parser); if (finish_p) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_UNIFORM)) != 0) return c_finish_omp_clauses (clauses, C_ORT_OMP_DECLARE_SIMD); return c_finish_omp_clauses (clauses, C_ORT_OMP); } return clauses; } /* OpenACC 2.0, OpenMP 2.5: structured-block: statement In practice, we're also interested in adding the statement to an outer node. So it is convenient if we work around the fact that c_parser_statement calls add_stmt. */ static tree c_parser_omp_structured_block (c_parser *parser, bool *if_p) { tree stmt = push_stmt_list (); c_parser_statement (parser, if_p); return pop_stmt_list (stmt); } /* OpenACC 2.0: # pragma acc cache (variable-list) new-line LOC is the location of the #pragma token. */ static tree c_parser_oacc_cache (location_t loc, c_parser *parser) { tree stmt, clauses; clauses = c_parser_omp_var_list_parens (parser, OMP_CLAUSE__CACHE_, NULL); clauses = c_finish_omp_clauses (clauses, C_ORT_ACC); c_parser_skip_to_pragma_eol (parser); stmt = make_node (OACC_CACHE); TREE_TYPE (stmt) = void_type_node; OACC_CACHE_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, loc); add_stmt (stmt); return stmt; } /* OpenACC 2.0: # pragma acc data oacc-data-clause[optseq] new-line structured-block LOC is the location of the #pragma token. */ #define OACC_DATA_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) ) static tree c_parser_oacc_data (location_t loc, c_parser *parser, bool *if_p) { tree stmt, clauses, block; clauses = c_parser_oacc_all_clauses (parser, OACC_DATA_CLAUSE_MASK, "#pragma acc data"); block = c_begin_omp_parallel (); add_stmt (c_parser_omp_structured_block (parser, if_p)); stmt = c_finish_oacc_data (loc, clauses, block); return stmt; } /* OpenACC 2.0: # pragma acc declare oacc-data-clause[optseq] new-line */ #define OACC_DECLARE_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICE_RESIDENT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_LINK) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) ) static void c_parser_oacc_declare (c_parser *parser) { location_t pragma_loc = c_parser_peek_token (parser)->location; tree clauses, stmt, t, decl; bool error = false; c_parser_consume_pragma (parser); clauses = c_parser_oacc_all_clauses (parser, OACC_DECLARE_CLAUSE_MASK, "#pragma acc declare"); if (!clauses) { error_at (pragma_loc, "no valid clauses specified in %<#pragma acc declare%>"); return; } for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t)) { location_t loc = OMP_CLAUSE_LOCATION (t); decl = OMP_CLAUSE_DECL (t); if (!DECL_P (decl)) { error_at (loc, "array section in %<#pragma acc declare%>"); error = true; continue; } switch (OMP_CLAUSE_MAP_KIND (t)) { case GOMP_MAP_FIRSTPRIVATE_POINTER: case GOMP_MAP_FORCE_ALLOC: case GOMP_MAP_FORCE_TO: case GOMP_MAP_FORCE_DEVICEPTR: case GOMP_MAP_DEVICE_RESIDENT: break; case GOMP_MAP_LINK: if (!global_bindings_p () && (TREE_STATIC (decl) || !DECL_EXTERNAL (decl))) { error_at (loc, "%qD must be a global variable in " "%<#pragma acc declare link%>", decl); error = true; continue; } break; default: if (global_bindings_p ()) { error_at (loc, "invalid OpenACC clause at file scope"); error = true; continue; } if (DECL_EXTERNAL (decl)) { error_at (loc, "invalid use of %<extern%> variable %qD " "in %<#pragma acc declare%>", decl); error = true; continue; } else if (TREE_PUBLIC (decl)) { error_at (loc, "invalid use of %<global%> variable %qD " "in %<#pragma acc declare%>", decl); error = true; continue; } break; } if (lookup_attribute ("omp declare target", DECL_ATTRIBUTES (decl)) || lookup_attribute ("omp declare target link", DECL_ATTRIBUTES (decl))) { error_at (loc, "variable %qD used more than once with " "%<#pragma acc declare%>", decl); error = true; continue; } if (!error) { tree id; if (OMP_CLAUSE_MAP_KIND (t) == GOMP_MAP_LINK) id = get_identifier ("omp declare target link"); else id = get_identifier ("omp declare target"); DECL_ATTRIBUTES (decl) = tree_cons (id, NULL_TREE, DECL_ATTRIBUTES (decl)); if (global_bindings_p ()) { symtab_node *node = symtab_node::get (decl); if (node != NULL) { node->offloadable = 1; if (ENABLE_OFFLOADING) { g->have_offload = true; if (is_a <varpool_node *> (node)) vec_safe_push (offload_vars, decl); } } } } } if (error || global_bindings_p ()) return; stmt = make_node (OACC_DECLARE); TREE_TYPE (stmt) = void_type_node; OACC_DECLARE_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, pragma_loc); add_stmt (stmt); return; } /* OpenACC 2.0: # pragma acc enter data oacc-enter-data-clause[optseq] new-line or # pragma acc exit data oacc-exit-data-clause[optseq] new-line LOC is the location of the #pragma token. */ #define OACC_ENTER_DATA_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) ) #define OACC_EXIT_DATA_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DELETE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) ) static void c_parser_oacc_enter_exit_data (c_parser *parser, bool enter) { location_t loc = c_parser_peek_token (parser)->location; tree clauses, stmt; const char *p = ""; c_parser_consume_pragma (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); c_parser_consume_token (parser); } if (strcmp (p, "data") != 0) { error_at (loc, "expected %<data%> after %<#pragma acc %s%>", enter ? "enter" : "exit"); parser->error = true; c_parser_skip_to_pragma_eol (parser); return; } if (enter) clauses = c_parser_oacc_all_clauses (parser, OACC_ENTER_DATA_CLAUSE_MASK, "#pragma acc enter data"); else clauses = c_parser_oacc_all_clauses (parser, OACC_EXIT_DATA_CLAUSE_MASK, "#pragma acc exit data"); if (omp_find_clause (clauses, OMP_CLAUSE_MAP) == NULL_TREE) { error_at (loc, "%<#pragma acc %s data%> has no data movement clause", enter ? "enter" : "exit"); return; } stmt = enter ? make_node (OACC_ENTER_DATA) : make_node (OACC_EXIT_DATA); TREE_TYPE (stmt) = void_type_node; OMP_STANDALONE_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, loc); add_stmt (stmt); } /* OpenACC 2.0: # pragma acc host_data oacc-data-clause[optseq] new-line structured-block */ #define OACC_HOST_DATA_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_USE_DEVICE) ) static tree c_parser_oacc_host_data (location_t loc, c_parser *parser, bool *if_p) { tree stmt, clauses, block; clauses = c_parser_oacc_all_clauses (parser, OACC_HOST_DATA_CLAUSE_MASK, "#pragma acc host_data"); block = c_begin_omp_parallel (); add_stmt (c_parser_omp_structured_block (parser, if_p)); stmt = c_finish_oacc_host_data (loc, clauses, block); return stmt; } /* OpenACC 2.0: # pragma acc loop oacc-loop-clause[optseq] new-line structured-block LOC is the location of the #pragma token. */ #define OACC_LOOP_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COLLAPSE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_REDUCTION) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_GANG) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WORKER) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_VECTOR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_AUTO) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_INDEPENDENT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_SEQ) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_TILE) ) static tree c_parser_oacc_loop (location_t loc, c_parser *parser, char *p_name, omp_clause_mask mask, tree *cclauses, bool *if_p) { bool is_parallel = ((mask >> PRAGMA_OACC_CLAUSE_REDUCTION) & 1) == 1; strcat (p_name, " loop"); mask |= OACC_LOOP_CLAUSE_MASK; tree clauses = c_parser_oacc_all_clauses (parser, mask, p_name, cclauses == NULL); if (cclauses) { clauses = c_oacc_split_loop_clauses (clauses, cclauses, is_parallel); if (*cclauses) *cclauses = c_finish_omp_clauses (*cclauses, C_ORT_ACC); if (clauses) clauses = c_finish_omp_clauses (clauses, C_ORT_ACC); } tree block = c_begin_compound_stmt (true); tree stmt = c_parser_omp_for_loop (loc, parser, OACC_LOOP, clauses, NULL, if_p); block = c_end_compound_stmt (loc, block, true); add_stmt (block); return stmt; } /* OpenACC 2.0: # pragma acc kernels oacc-kernels-clause[optseq] new-line structured-block or # pragma acc parallel oacc-parallel-clause[optseq] new-line structured-block LOC is the location of the #pragma token. */ #define OACC_KERNELS_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEFAULT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NUM_GANGS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NUM_WORKERS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_VECTOR_LENGTH) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) ) #define OACC_PARALLEL_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEFAULT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NUM_GANGS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NUM_WORKERS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_REDUCTION) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_VECTOR_LENGTH) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) ) static tree c_parser_oacc_kernels_parallel (location_t loc, c_parser *parser, enum pragma_kind p_kind, char *p_name, bool *if_p) { omp_clause_mask mask; enum tree_code code; switch (p_kind) { case PRAGMA_OACC_KERNELS: strcat (p_name, " kernels"); mask = OACC_KERNELS_CLAUSE_MASK; code = OACC_KERNELS; break; case PRAGMA_OACC_PARALLEL: strcat (p_name, " parallel"); mask = OACC_PARALLEL_CLAUSE_MASK; code = OACC_PARALLEL; break; default: gcc_unreachable (); } if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "loop") == 0) { c_parser_consume_token (parser); tree block = c_begin_omp_parallel (); tree clauses; c_parser_oacc_loop (loc, parser, p_name, mask, &clauses, if_p); return c_finish_omp_construct (loc, code, block, clauses); } } tree clauses = c_parser_oacc_all_clauses (parser, mask, p_name); tree block = c_begin_omp_parallel (); add_stmt (c_parser_omp_structured_block (parser, if_p)); return c_finish_omp_construct (loc, code, block, clauses); } /* OpenACC 2.0: # pragma acc routine oacc-routine-clause[optseq] new-line function-definition # pragma acc routine ( name ) oacc-routine-clause[optseq] new-line */ #define OACC_ROUTINE_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_GANG) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WORKER) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_VECTOR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_SEQ) ) /* Parse an OpenACC routine directive. For named directives, we apply immediately to the named function. For unnamed ones we then parse a declaration or definition, which must be for a function. */ static void c_parser_oacc_routine (c_parser *parser, enum pragma_context context) { gcc_checking_assert (context == pragma_external); oacc_routine_data data; data.error_seen = false; data.fndecl_seen = false; data.clauses = NULL_TREE; data.loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); /* Look for optional '( name )'. */ if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { c_parser_consume_token (parser); /* '(' */ tree decl = NULL_TREE; c_token *name_token = c_parser_peek_token (parser); location_t name_loc = name_token->location; if (name_token->type == CPP_NAME && (name_token->id_kind == C_ID_ID || name_token->id_kind == C_ID_TYPENAME)) { decl = lookup_name (name_token->value); if (!decl) error_at (name_loc, "%qE has not been declared", name_token->value); c_parser_consume_token (parser); } else c_parser_error (parser, "expected function name"); if (!decl || !c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) { c_parser_skip_to_pragma_eol (parser, false); return; } data.clauses = c_parser_oacc_all_clauses (parser, OACC_ROUTINE_CLAUSE_MASK, "#pragma acc routine"); if (TREE_CODE (decl) != FUNCTION_DECL) { error_at (name_loc, "%qD does not refer to a function", decl); return; } c_finish_oacc_routine (&data, decl, false); } else /* No optional '( name )'. */ { data.clauses = c_parser_oacc_all_clauses (parser, OACC_ROUTINE_CLAUSE_MASK, "#pragma acc routine"); /* Emit a helpful diagnostic if there's another pragma following this one. Also don't allow a static assertion declaration, as in the following we'll just parse a *single* "declaration or function definition", and the static assertion counts an one. */ if (c_parser_next_token_is (parser, CPP_PRAGMA) || c_parser_next_token_is_keyword (parser, RID_STATIC_ASSERT)) { error_at (data.loc, "%<#pragma acc routine%> not immediately followed by" " function declaration or definition"); /* ..., and then just keep going. */ return; } /* We only have to consider the pragma_external case here. */ if (c_parser_next_token_is (parser, CPP_KEYWORD) && c_parser_peek_token (parser)->keyword == RID_EXTENSION) { int ext = disable_extension_diagnostics (); do c_parser_consume_token (parser); while (c_parser_next_token_is (parser, CPP_KEYWORD) && c_parser_peek_token (parser)->keyword == RID_EXTENSION); c_parser_declaration_or_fndef (parser, true, true, true, false, true, NULL, vNULL, &data); restore_extension_diagnostics (ext); } else c_parser_declaration_or_fndef (parser, true, true, true, false, true, NULL, vNULL, &data); } } /* Finalize an OpenACC routine pragma, applying it to FNDECL. IS_DEFN is true if we're applying it to the definition. */ static void c_finish_oacc_routine (struct oacc_routine_data *data, tree fndecl, bool is_defn) { /* Keep going if we're in error reporting mode. */ if (data->error_seen || fndecl == error_mark_node) return; if (data->fndecl_seen) { error_at (data->loc, "%<#pragma acc routine%> not immediately followed by" " a single function declaration or definition"); data->error_seen = true; return; } if (fndecl == NULL_TREE || TREE_CODE (fndecl) != FUNCTION_DECL) { error_at (data->loc, "%<#pragma acc routine%> not immediately followed by" " function declaration or definition"); data->error_seen = true; return; } if (oacc_get_fn_attrib (fndecl)) { error_at (data->loc, "%<#pragma acc routine%> already applied to %qD", fndecl); data->error_seen = true; return; } if (TREE_USED (fndecl) || (!is_defn && DECL_SAVED_TREE (fndecl))) { error_at (data->loc, TREE_USED (fndecl) ? G_("%<#pragma acc routine%> must be applied before use") : G_("%<#pragma acc routine%> must be applied before " "definition")); data->error_seen = true; return; } /* Process the routine's dimension clauses. */ tree dims = oacc_build_routine_dims (data->clauses); oacc_replace_fn_attrib (fndecl, dims); /* Add an "omp declare target" attribute. */ DECL_ATTRIBUTES (fndecl) = tree_cons (get_identifier ("omp declare target"), NULL_TREE, DECL_ATTRIBUTES (fndecl)); /* Remember that we've used this "#pragma acc routine". */ data->fndecl_seen = true; } /* OpenACC 2.0: # pragma acc update oacc-update-clause[optseq] new-line */ #define OACC_UPDATE_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_HOST) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_SELF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) ) static void c_parser_oacc_update (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); tree clauses = c_parser_oacc_all_clauses (parser, OACC_UPDATE_CLAUSE_MASK, "#pragma acc update"); if (omp_find_clause (clauses, OMP_CLAUSE_MAP) == NULL_TREE) { error_at (loc, "%<#pragma acc update%> must contain at least one " "%<device%> or %<host%> or %<self%> clause"); return; } if (parser->error) return; tree stmt = make_node (OACC_UPDATE); TREE_TYPE (stmt) = void_type_node; OACC_UPDATE_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, loc); add_stmt (stmt); } /* OpenACC 2.0: # pragma acc wait [(intseq)] oacc-wait-clause[optseq] new-line LOC is the location of the #pragma token. */ #define OACC_WAIT_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) ) static tree c_parser_oacc_wait (location_t loc, c_parser *parser, char *p_name) { tree clauses, list = NULL_TREE, stmt = NULL_TREE; if (c_parser_peek_token (parser)->type == CPP_OPEN_PAREN) list = c_parser_oacc_wait_list (parser, loc, list); strcpy (p_name, " wait"); clauses = c_parser_oacc_all_clauses (parser, OACC_WAIT_CLAUSE_MASK, p_name); stmt = c_finish_oacc_wait (loc, list, clauses); add_stmt (stmt); return stmt; } /* OpenMP 2.5: # pragma omp atomic new-line expression-stmt expression-stmt: x binop= expr | x++ | ++x | x-- | --x binop: +, *, -, /, &, ^, |, <<, >> where x is an lvalue expression with scalar type. OpenMP 3.1: # pragma omp atomic new-line update-stmt # pragma omp atomic read new-line read-stmt # pragma omp atomic write new-line write-stmt # pragma omp atomic update new-line update-stmt # pragma omp atomic capture new-line capture-stmt # pragma omp atomic capture new-line capture-block read-stmt: v = x write-stmt: x = expr update-stmt: expression-stmt | x = x binop expr capture-stmt: v = expression-stmt capture-block: { v = x; update-stmt; } | { update-stmt; v = x; } OpenMP 4.0: update-stmt: expression-stmt | x = x binop expr | x = expr binop x capture-stmt: v = update-stmt capture-block: { v = x; update-stmt; } | { update-stmt; v = x; } | { v = x; x = expr; } where x and v are lvalue expressions with scalar type. LOC is the location of the #pragma token. */ static void c_parser_omp_atomic (location_t loc, c_parser *parser) { tree lhs = NULL_TREE, rhs = NULL_TREE, v = NULL_TREE; tree lhs1 = NULL_TREE, rhs1 = NULL_TREE; tree stmt, orig_lhs, unfolded_lhs = NULL_TREE, unfolded_lhs1 = NULL_TREE; enum tree_code code = OMP_ATOMIC, opcode = NOP_EXPR; struct c_expr expr; location_t eloc; bool structured_block = false; bool swapped = false; bool seq_cst = false; bool non_lvalue_p; if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (!strcmp (p, "seq_cst")) { seq_cst = true; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA) && c_parser_peek_2nd_token (parser)->type == CPP_NAME) c_parser_consume_token (parser); } } if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (!strcmp (p, "read")) code = OMP_ATOMIC_READ; else if (!strcmp (p, "write")) code = NOP_EXPR; else if (!strcmp (p, "update")) code = OMP_ATOMIC; else if (!strcmp (p, "capture")) code = OMP_ATOMIC_CAPTURE_NEW; else p = NULL; if (p) c_parser_consume_token (parser); } if (!seq_cst) { if (c_parser_next_token_is (parser, CPP_COMMA) && c_parser_peek_2nd_token (parser)->type == CPP_NAME) c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (!strcmp (p, "seq_cst")) { seq_cst = true; c_parser_consume_token (parser); } } } c_parser_skip_to_pragma_eol (parser); switch (code) { case OMP_ATOMIC_READ: case NOP_EXPR: /* atomic write */ v = c_parser_cast_expression (parser, NULL).value; non_lvalue_p = !lvalue_p (v); v = c_fully_fold (v, false, NULL, true); if (v == error_mark_node) goto saw_error; if (non_lvalue_p) v = non_lvalue (v); loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_EQ, "expected %<=%>")) goto saw_error; if (code == NOP_EXPR) { lhs = c_parser_expression (parser).value; lhs = c_fully_fold (lhs, false, NULL); if (lhs == error_mark_node) goto saw_error; } else { lhs = c_parser_cast_expression (parser, NULL).value; non_lvalue_p = !lvalue_p (lhs); lhs = c_fully_fold (lhs, false, NULL, true); if (lhs == error_mark_node) goto saw_error; if (non_lvalue_p) lhs = non_lvalue (lhs); } if (code == NOP_EXPR) { /* atomic write is represented by OMP_ATOMIC with NOP_EXPR opcode. */ code = OMP_ATOMIC; rhs = lhs; lhs = v; v = NULL_TREE; } goto done; case OMP_ATOMIC_CAPTURE_NEW: if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { c_parser_consume_token (parser); structured_block = true; } else { v = c_parser_cast_expression (parser, NULL).value; non_lvalue_p = !lvalue_p (v); v = c_fully_fold (v, false, NULL, true); if (v == error_mark_node) goto saw_error; if (non_lvalue_p) v = non_lvalue (v); if (!c_parser_require (parser, CPP_EQ, "expected %<=%>")) goto saw_error; } break; default: break; } /* For structured_block case we don't know yet whether old or new x should be captured. */ restart: eloc = c_parser_peek_token (parser)->location; expr = c_parser_cast_expression (parser, NULL); lhs = expr.value; expr = default_function_array_conversion (eloc, expr); unfolded_lhs = expr.value; lhs = c_fully_fold (lhs, false, NULL, true); orig_lhs = lhs; switch (TREE_CODE (lhs)) { case ERROR_MARK: saw_error: c_parser_skip_to_end_of_block_or_statement (parser); if (structured_block) { if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) c_parser_consume_token (parser); else if (code == OMP_ATOMIC_CAPTURE_NEW) { c_parser_skip_to_end_of_block_or_statement (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) c_parser_consume_token (parser); } } return; case POSTINCREMENT_EXPR: if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block) code = OMP_ATOMIC_CAPTURE_OLD; /* FALLTHROUGH */ case PREINCREMENT_EXPR: lhs = TREE_OPERAND (lhs, 0); unfolded_lhs = NULL_TREE; opcode = PLUS_EXPR; rhs = integer_one_node; break; case POSTDECREMENT_EXPR: if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block) code = OMP_ATOMIC_CAPTURE_OLD; /* FALLTHROUGH */ case PREDECREMENT_EXPR: lhs = TREE_OPERAND (lhs, 0); unfolded_lhs = NULL_TREE; opcode = MINUS_EXPR; rhs = integer_one_node; break; case COMPOUND_EXPR: if (TREE_CODE (TREE_OPERAND (lhs, 0)) == SAVE_EXPR && TREE_CODE (TREE_OPERAND (lhs, 1)) == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0)) == MODIFY_EXPR && TREE_OPERAND (TREE_OPERAND (lhs, 1), 1) == TREE_OPERAND (lhs, 0) && TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0), 0))) == BOOLEAN_TYPE) /* Undo effects of boolean_increment for post {in,de}crement. */ lhs = TREE_OPERAND (TREE_OPERAND (lhs, 1), 0); /* FALLTHRU */ case MODIFY_EXPR: if (TREE_CODE (lhs) == MODIFY_EXPR && TREE_CODE (TREE_TYPE (TREE_OPERAND (lhs, 0))) == BOOLEAN_TYPE) { /* Undo effects of boolean_increment. */ if (integer_onep (TREE_OPERAND (lhs, 1))) { /* This is pre or post increment. */ rhs = TREE_OPERAND (lhs, 1); lhs = TREE_OPERAND (lhs, 0); unfolded_lhs = NULL_TREE; opcode = NOP_EXPR; if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block && TREE_CODE (orig_lhs) == COMPOUND_EXPR) code = OMP_ATOMIC_CAPTURE_OLD; break; } if (TREE_CODE (TREE_OPERAND (lhs, 1)) == TRUTH_NOT_EXPR && TREE_OPERAND (lhs, 0) == TREE_OPERAND (TREE_OPERAND (lhs, 1), 0)) { /* This is pre or post decrement. */ rhs = TREE_OPERAND (lhs, 1); lhs = TREE_OPERAND (lhs, 0); unfolded_lhs = NULL_TREE; opcode = NOP_EXPR; if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block && TREE_CODE (orig_lhs) == COMPOUND_EXPR) code = OMP_ATOMIC_CAPTURE_OLD; break; } } /* FALLTHRU */ default: if (!lvalue_p (unfolded_lhs)) lhs = non_lvalue (lhs); switch (c_parser_peek_token (parser)->type) { case CPP_MULT_EQ: opcode = MULT_EXPR; break; case CPP_DIV_EQ: opcode = TRUNC_DIV_EXPR; break; case CPP_PLUS_EQ: opcode = PLUS_EXPR; break; case CPP_MINUS_EQ: opcode = MINUS_EXPR; break; case CPP_LSHIFT_EQ: opcode = LSHIFT_EXPR; break; case CPP_RSHIFT_EQ: opcode = RSHIFT_EXPR; break; case CPP_AND_EQ: opcode = BIT_AND_EXPR; break; case CPP_OR_EQ: opcode = BIT_IOR_EXPR; break; case CPP_XOR_EQ: opcode = BIT_XOR_EXPR; break; case CPP_EQ: c_parser_consume_token (parser); eloc = c_parser_peek_token (parser)->location; expr = c_parser_expr_no_commas (parser, NULL, unfolded_lhs); rhs1 = expr.value; switch (TREE_CODE (rhs1)) { case MULT_EXPR: case TRUNC_DIV_EXPR: case RDIV_EXPR: case PLUS_EXPR: case MINUS_EXPR: case LSHIFT_EXPR: case RSHIFT_EXPR: case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: if (c_tree_equal (TREE_OPERAND (rhs1, 0), unfolded_lhs)) { opcode = TREE_CODE (rhs1); rhs = c_fully_fold (TREE_OPERAND (rhs1, 1), false, NULL, true); rhs1 = c_fully_fold (TREE_OPERAND (rhs1, 0), false, NULL, true); goto stmt_done; } if (c_tree_equal (TREE_OPERAND (rhs1, 1), unfolded_lhs)) { opcode = TREE_CODE (rhs1); rhs = c_fully_fold (TREE_OPERAND (rhs1, 0), false, NULL, true); rhs1 = c_fully_fold (TREE_OPERAND (rhs1, 1), false, NULL, true); swapped = !commutative_tree_code (opcode); goto stmt_done; } break; case ERROR_MARK: goto saw_error; default: break; } if (c_parser_peek_token (parser)->type == CPP_SEMICOLON) { if (structured_block && code == OMP_ATOMIC_CAPTURE_NEW) { code = OMP_ATOMIC_CAPTURE_OLD; v = lhs; lhs = NULL_TREE; expr = default_function_array_read_conversion (eloc, expr); unfolded_lhs1 = expr.value; lhs1 = c_fully_fold (unfolded_lhs1, false, NULL, true); rhs1 = NULL_TREE; c_parser_consume_token (parser); goto restart; } if (structured_block) { opcode = NOP_EXPR; expr = default_function_array_read_conversion (eloc, expr); rhs = c_fully_fold (expr.value, false, NULL, true); rhs1 = NULL_TREE; goto stmt_done; } } c_parser_error (parser, "invalid form of %<#pragma omp atomic%>"); goto saw_error; default: c_parser_error (parser, "invalid operator for %<#pragma omp atomic%>"); goto saw_error; } /* Arrange to pass the location of the assignment operator to c_finish_omp_atomic. */ loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); eloc = c_parser_peek_token (parser)->location; expr = c_parser_expression (parser); expr = default_function_array_read_conversion (eloc, expr); rhs = expr.value; rhs = c_fully_fold (rhs, false, NULL, true); break; } stmt_done: if (structured_block && code == OMP_ATOMIC_CAPTURE_NEW) { if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>")) goto saw_error; v = c_parser_cast_expression (parser, NULL).value; non_lvalue_p = !lvalue_p (v); v = c_fully_fold (v, false, NULL, true); if (v == error_mark_node) goto saw_error; if (non_lvalue_p) v = non_lvalue (v); if (!c_parser_require (parser, CPP_EQ, "expected %<=%>")) goto saw_error; eloc = c_parser_peek_token (parser)->location; expr = c_parser_cast_expression (parser, NULL); lhs1 = expr.value; expr = default_function_array_read_conversion (eloc, expr); unfolded_lhs1 = expr.value; lhs1 = c_fully_fold (lhs1, false, NULL, true); if (lhs1 == error_mark_node) goto saw_error; if (!lvalue_p (unfolded_lhs1)) lhs1 = non_lvalue (lhs1); } if (structured_block) { c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); c_parser_require (parser, CPP_CLOSE_BRACE, "expected %<}%>"); } done: if (unfolded_lhs && unfolded_lhs1 && !c_tree_equal (unfolded_lhs, unfolded_lhs1)) { error ("%<#pragma omp atomic capture%> uses two different " "expressions for memory"); stmt = error_mark_node; } else stmt = c_finish_omp_atomic (loc, code, opcode, lhs, rhs, v, lhs1, rhs1, swapped, seq_cst); if (stmt != error_mark_node) add_stmt (stmt); if (!structured_block) c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* OpenMP 2.5: # pragma omp barrier new-line */ static void c_parser_omp_barrier (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); c_finish_omp_barrier (loc); } /* OpenMP 2.5: # pragma omp critical [(name)] new-line structured-block OpenMP 4.5: # pragma omp critical [(name) [hint(expression)]] new-line LOC is the location of the #pragma itself. */ #define OMP_CRITICAL_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_HINT) ) static tree c_parser_omp_critical (location_t loc, c_parser *parser, bool *if_p) { tree stmt, name = NULL_TREE, clauses = NULL_TREE; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { name = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else c_parser_error (parser, "expected identifier"); clauses = c_parser_omp_all_clauses (parser, OMP_CRITICAL_CLAUSE_MASK, "#pragma omp critical"); } else { if (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) c_parser_error (parser, "expected %<(%> or end of line"); c_parser_skip_to_pragma_eol (parser); } stmt = c_parser_omp_structured_block (parser, if_p); return c_finish_omp_critical (loc, stmt, name, clauses); } /* OpenMP 2.5: # pragma omp flush flush-vars[opt] new-line flush-vars: ( variable-list ) */ static void c_parser_omp_flush (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) c_parser_omp_var_list_parens (parser, OMP_CLAUSE_ERROR, NULL); else if (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) c_parser_error (parser, "expected %<(%> or end of line"); c_parser_skip_to_pragma_eol (parser); c_finish_omp_flush (loc); } /* Parse the restricted form of loop statements allowed by OpenACC and OpenMP. The real trick here is to determine the loop control variable early so that we can push a new decl if necessary to make it private. LOC is the location of the "acc" or "omp" in "#pragma acc" or "#pragma omp", respectively. */ static tree c_parser_omp_for_loop (location_t loc, c_parser *parser, enum tree_code code, tree clauses, tree *cclauses, bool *if_p) { tree decl, cond, incr, save_break, save_cont, body, init, stmt, cl; tree declv, condv, incrv, initv, ret = NULL_TREE; tree pre_body = NULL_TREE, this_pre_body; tree ordered_cl = NULL_TREE; bool fail = false, open_brace_parsed = false; int i, collapse = 1, ordered = 0, count, nbraces = 0; location_t for_loc; bool tiling = false; vec<tree, va_gc> *for_block = make_tree_vector (); for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl)) if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE) collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (cl)); else if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_TILE) { tiling = true; collapse = list_length (OMP_CLAUSE_TILE_LIST (cl)); } else if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_ORDERED && OMP_CLAUSE_ORDERED_EXPR (cl)) { ordered_cl = cl; ordered = tree_to_shwi (OMP_CLAUSE_ORDERED_EXPR (cl)); } if (ordered && ordered < collapse) { error_at (OMP_CLAUSE_LOCATION (ordered_cl), "%<ordered%> clause parameter is less than %<collapse%>"); OMP_CLAUSE_ORDERED_EXPR (ordered_cl) = build_int_cst (NULL_TREE, collapse); ordered = collapse; } if (ordered) { for (tree *pc = &clauses; *pc; ) if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_LINEAR) { error_at (OMP_CLAUSE_LOCATION (*pc), "%<linear%> clause may not be specified together " "with %<ordered%> clause with a parameter"); *pc = OMP_CLAUSE_CHAIN (*pc); } else pc = &OMP_CLAUSE_CHAIN (*pc); } gcc_assert (tiling || (collapse >= 1 && ordered >= 0)); count = ordered ? ordered : collapse; declv = make_tree_vec (count); initv = make_tree_vec (count); condv = make_tree_vec (count); incrv = make_tree_vec (count); if (!c_parser_next_token_is_keyword (parser, RID_FOR)) { c_parser_error (parser, "for statement expected"); return NULL; } for_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); for (i = 0; i < count; i++) { int bracecount = 0; matching_parens parens; if (!parens.require_open (parser)) goto pop_scopes; /* Parse the initialization declaration or expression. */ if (c_parser_next_tokens_start_declaration (parser)) { if (i > 0) vec_safe_push (for_block, c_begin_compound_stmt (true)); this_pre_body = push_stmt_list (); c_parser_declaration_or_fndef (parser, true, true, true, true, true, NULL, vNULL); if (this_pre_body) { this_pre_body = pop_stmt_list (this_pre_body); if (pre_body) { tree t = pre_body; pre_body = push_stmt_list (); add_stmt (t); add_stmt (this_pre_body); pre_body = pop_stmt_list (pre_body); } else pre_body = this_pre_body; } decl = check_for_loop_decls (for_loc, flag_isoc99); if (decl == NULL) goto error_init; if (DECL_INITIAL (decl) == error_mark_node) decl = error_mark_node; init = decl; } else if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_EQ) { struct c_expr decl_exp; struct c_expr init_exp; location_t init_loc; decl_exp = c_parser_postfix_expression (parser); decl = decl_exp.value; c_parser_require (parser, CPP_EQ, "expected %<=%>"); init_loc = c_parser_peek_token (parser)->location; init_exp = c_parser_expr_no_commas (parser, NULL); init_exp = default_function_array_read_conversion (init_loc, init_exp); init = build_modify_expr (init_loc, decl, decl_exp.original_type, NOP_EXPR, init_loc, init_exp.value, init_exp.original_type); init = c_process_expr_stmt (init_loc, init); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } else { error_init: c_parser_error (parser, "expected iteration declaration or initialization"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); fail = true; goto parse_next; } /* Parse the loop condition. */ cond = NULL_TREE; if (c_parser_next_token_is_not (parser, CPP_SEMICOLON)) { location_t cond_loc = c_parser_peek_token (parser)->location; struct c_expr cond_expr = c_parser_binary_expression (parser, NULL, NULL_TREE); cond = cond_expr.value; cond = c_objc_common_truthvalue_conversion (cond_loc, cond); if (COMPARISON_CLASS_P (cond)) { tree op0 = TREE_OPERAND (cond, 0), op1 = TREE_OPERAND (cond, 1); op0 = c_fully_fold (op0, false, NULL); op1 = c_fully_fold (op1, false, NULL); TREE_OPERAND (cond, 0) = op0; TREE_OPERAND (cond, 1) = op1; } switch (cond_expr.original_code) { case GT_EXPR: case GE_EXPR: case LT_EXPR: case LE_EXPR: break; default: /* Can't be cond = error_mark_node, because we want to preserve the location until c_finish_omp_for. */ cond = build1 (NOP_EXPR, boolean_type_node, error_mark_node); break; } protected_set_expr_location (cond, cond_loc); } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); /* Parse the increment expression. */ incr = NULL_TREE; if (c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN)) { location_t incr_loc = c_parser_peek_token (parser)->location; incr = c_process_expr_stmt (incr_loc, c_parser_expression (parser).value); } parens.skip_until_found_close (parser); if (decl == NULL || decl == error_mark_node || init == error_mark_node) fail = true; else { TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; } parse_next: if (i == count - 1) break; /* FIXME: OpenMP 3.0 draft isn't very clear on what exactly is allowed in between the collapsed for loops to be still considered perfectly nested. Hopefully the final version clarifies this. For now handle (multiple) {'s and empty statements. */ do { if (c_parser_next_token_is_keyword (parser, RID_FOR)) { c_parser_consume_token (parser); break; } else if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { c_parser_consume_token (parser); bracecount++; } else if (bracecount && c_parser_next_token_is (parser, CPP_SEMICOLON)) c_parser_consume_token (parser); else { c_parser_error (parser, "not enough perfectly nested loops"); if (bracecount) { open_brace_parsed = true; bracecount--; } fail = true; count = 0; break; } } while (1); nbraces += bracecount; } if (nbraces) if_p = NULL; save_break = c_break_label; c_break_label = size_one_node; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = push_stmt_list (); if (open_brace_parsed) { location_t here = c_parser_peek_token (parser)->location; stmt = c_begin_compound_stmt (true); c_parser_compound_statement_nostart (parser); add_stmt (c_end_compound_stmt (here, stmt, true)); } else add_stmt (c_parser_c99_block_statement (parser, if_p)); if (c_cont_label) { tree t = build1 (LABEL_EXPR, void_type_node, c_cont_label); SET_EXPR_LOCATION (t, loc); add_stmt (t); } body = pop_stmt_list (body); c_break_label = save_break; c_cont_label = save_cont; while (nbraces) { if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { c_parser_consume_token (parser); nbraces--; } else if (c_parser_next_token_is (parser, CPP_SEMICOLON)) c_parser_consume_token (parser); else { c_parser_error (parser, "collapsed loops not perfectly nested"); while (nbraces) { location_t here = c_parser_peek_token (parser)->location; stmt = c_begin_compound_stmt (true); add_stmt (body); c_parser_compound_statement_nostart (parser); body = c_end_compound_stmt (here, stmt, true); nbraces--; } goto pop_scopes; } } /* Only bother calling c_finish_omp_for if we haven't already generated an error from the initialization parsing. */ if (!fail) { stmt = c_finish_omp_for (loc, code, declv, NULL, initv, condv, incrv, body, pre_body); /* Check for iterators appearing in lb, b or incr expressions. */ if (stmt && !c_omp_check_loop_iv (stmt, declv, NULL)) stmt = NULL_TREE; if (stmt) { add_stmt (stmt); if (cclauses != NULL && cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] != NULL) { tree *c; for (c = &cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; *c ; ) if (OMP_CLAUSE_CODE (*c) != OMP_CLAUSE_FIRSTPRIVATE && OMP_CLAUSE_CODE (*c) != OMP_CLAUSE_LASTPRIVATE) c = &OMP_CLAUSE_CHAIN (*c); else { for (i = 0; i < count; i++) if (TREE_VEC_ELT (declv, i) == OMP_CLAUSE_DECL (*c)) break; if (i == count) c = &OMP_CLAUSE_CHAIN (*c); else if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_FIRSTPRIVATE) { error_at (loc, "iteration variable %qD should not be firstprivate", OMP_CLAUSE_DECL (*c)); *c = OMP_CLAUSE_CHAIN (*c); } else { /* Move lastprivate (decl) clause to OMP_FOR_CLAUSES. */ tree l = *c; *c = OMP_CLAUSE_CHAIN (*c); if (code == OMP_SIMD) { OMP_CLAUSE_CHAIN (l) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; cclauses[C_OMP_CLAUSE_SPLIT_FOR] = l; } else { OMP_CLAUSE_CHAIN (l) = clauses; clauses = l; } } } } OMP_FOR_CLAUSES (stmt) = clauses; } ret = stmt; } pop_scopes: while (!for_block->is_empty ()) { /* FIXME diagnostics: LOC below should be the actual location of this particular for block. We need to build a list of locations to go along with FOR_BLOCK. */ stmt = c_end_compound_stmt (loc, for_block->pop (), true); add_stmt (stmt); } release_tree_vector (for_block); return ret; } /* Helper function for OpenMP parsing, split clauses and call finish_omp_clauses on each of the set of clauses afterwards. */ static void omp_split_clauses (location_t loc, enum tree_code code, omp_clause_mask mask, tree clauses, tree *cclauses) { int i; c_omp_split_clauses (loc, code, mask, clauses, cclauses); for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++) if (cclauses[i]) cclauses[i] = c_finish_omp_clauses (cclauses[i], C_ORT_OMP); } /* OpenMP 4.0: #pragma omp simd simd-clause[optseq] new-line for-loop LOC is the location of the #pragma token. */ #define OMP_SIMD_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SAFELEN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SIMDLEN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALIGNED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE)) static tree c_parser_omp_simd (location_t loc, c_parser *parser, char *p_name, omp_clause_mask mask, tree *cclauses, bool *if_p) { tree block, clauses, ret; strcat (p_name, " simd"); mask |= OMP_SIMD_CLAUSE_MASK; clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL); if (cclauses) { omp_split_clauses (loc, OMP_SIMD, mask, clauses, cclauses); clauses = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; tree c = omp_find_clause (cclauses[C_OMP_CLAUSE_SPLIT_FOR], OMP_CLAUSE_ORDERED); if (c && OMP_CLAUSE_ORDERED_EXPR (c)) { error_at (OMP_CLAUSE_LOCATION (c), "%<ordered%> clause with parameter may not be specified " "on %qs construct", p_name); OMP_CLAUSE_ORDERED_EXPR (c) = NULL_TREE; } } block = c_begin_compound_stmt (true); ret = c_parser_omp_for_loop (loc, parser, OMP_SIMD, clauses, cclauses, if_p); block = c_end_compound_stmt (loc, block, true); add_stmt (block); return ret; } /* OpenMP 2.5: #pragma omp for for-clause[optseq] new-line for-loop OpenMP 4.0: #pragma omp for simd for-simd-clause[optseq] new-line for-loop LOC is the location of the #pragma token. */ #define OMP_FOR_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDERED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_for (location_t loc, c_parser *parser, char *p_name, omp_clause_mask mask, tree *cclauses, bool *if_p) { tree block, clauses, ret; strcat (p_name, " for"); mask |= OMP_FOR_CLAUSE_MASK; /* parallel for{, simd} disallows nowait clause, but for target {teams distribute ,}parallel for{, simd} it should be accepted. */ if (cclauses && (mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0) mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT); /* Composite distribute parallel for{, simd} disallows ordered clause. */ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDERED); if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "simd") == 0) { tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT]; if (cclauses == NULL) cclauses = cclauses_buf; c_parser_consume_token (parser); if (!flag_openmp) /* flag_openmp_simd */ return c_parser_omp_simd (loc, parser, p_name, mask, cclauses, if_p); block = c_begin_compound_stmt (true); ret = c_parser_omp_simd (loc, parser, p_name, mask, cclauses, if_p); block = c_end_compound_stmt (loc, block, true); if (ret == NULL_TREE) return ret; ret = make_node (OMP_FOR); TREE_TYPE (ret) = void_type_node; OMP_FOR_BODY (ret) = block; OMP_FOR_CLAUSES (ret) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; SET_EXPR_LOCATION (ret, loc); add_stmt (ret); return ret; } } if (!flag_openmp) /* flag_openmp_simd */ { c_parser_skip_to_pragma_eol (parser, false); return NULL_TREE; } /* Composite distribute parallel for disallows linear clause. */ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR); clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL); if (cclauses) { omp_split_clauses (loc, OMP_FOR, mask, clauses, cclauses); clauses = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; } block = c_begin_compound_stmt (true); ret = c_parser_omp_for_loop (loc, parser, OMP_FOR, clauses, cclauses, if_p); block = c_end_compound_stmt (loc, block, true); add_stmt (block); return ret; } /* OpenMP 2.5: # pragma omp master new-line structured-block LOC is the location of the #pragma token. */ static tree c_parser_omp_master (location_t loc, c_parser *parser, bool *if_p) { c_parser_skip_to_pragma_eol (parser); return c_finish_omp_master (loc, c_parser_omp_structured_block (parser, if_p)); } /* OpenMP 2.5: # pragma omp ordered new-line structured-block OpenMP 4.5: # pragma omp ordered ordered-clauses new-line structured-block # pragma omp ordered depend-clauses new-line */ #define OMP_ORDERED_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_THREADS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SIMD)) #define OMP_ORDERED_DEPEND_CLAUSE_MASK \ (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) static bool c_parser_omp_ordered (c_parser *parser, enum pragma_context context, bool *if_p) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); if (context != pragma_stmt && context != pragma_compound) { c_parser_error (parser, "expected declaration specifiers"); c_parser_skip_to_pragma_eol (parser, false); return false; } if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (!strcmp ("depend", p)) { if (!flag_openmp) /* flag_openmp_simd */ { c_parser_skip_to_pragma_eol (parser, false); return false; } if (context == pragma_stmt) { error_at (loc, "%<#pragma omp ordered%> with %<depend%> clause may " "only be used in compound statements"); c_parser_skip_to_pragma_eol (parser, false); return false; } tree clauses = c_parser_omp_all_clauses (parser, OMP_ORDERED_DEPEND_CLAUSE_MASK, "#pragma omp ordered"); c_finish_omp_ordered (loc, clauses, NULL_TREE); return false; } } tree clauses = c_parser_omp_all_clauses (parser, OMP_ORDERED_CLAUSE_MASK, "#pragma omp ordered"); if (!flag_openmp /* flag_openmp_simd */ && omp_find_clause (clauses, OMP_CLAUSE_SIMD) == NULL_TREE) return false; c_finish_omp_ordered (loc, clauses, c_parser_omp_structured_block (parser, if_p)); return true; } /* OpenMP 2.5: section-scope: { section-sequence } section-sequence: section-directive[opt] structured-block section-sequence section-directive structured-block SECTIONS_LOC is the location of the #pragma omp sections. */ static tree c_parser_omp_sections_scope (location_t sections_loc, c_parser *parser) { tree stmt, substmt; bool error_suppress = false; location_t loc; loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>")) { /* Avoid skipping until the end of the block. */ parser->error = false; return NULL_TREE; } stmt = push_stmt_list (); if (c_parser_peek_token (parser)->pragma_kind != PRAGMA_OMP_SECTION) { substmt = c_parser_omp_structured_block (parser, NULL); substmt = build1 (OMP_SECTION, void_type_node, substmt); SET_EXPR_LOCATION (substmt, loc); add_stmt (substmt); } while (1) { if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) break; if (c_parser_next_token_is (parser, CPP_EOF)) break; loc = c_parser_peek_token (parser)->location; if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_OMP_SECTION) { c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); error_suppress = false; } else if (!error_suppress) { error_at (loc, "expected %<#pragma omp section%> or %<}%>"); error_suppress = true; } substmt = c_parser_omp_structured_block (parser, NULL); substmt = build1 (OMP_SECTION, void_type_node, substmt); SET_EXPR_LOCATION (substmt, loc); add_stmt (substmt); } c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, "expected %<#pragma omp section%> or %<}%>"); substmt = pop_stmt_list (stmt); stmt = make_node (OMP_SECTIONS); SET_EXPR_LOCATION (stmt, sections_loc); TREE_TYPE (stmt) = void_type_node; OMP_SECTIONS_BODY (stmt) = substmt; return add_stmt (stmt); } /* OpenMP 2.5: # pragma omp sections sections-clause[optseq] newline sections-scope LOC is the location of the #pragma token. */ #define OMP_SECTIONS_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_sections (location_t loc, c_parser *parser, char *p_name, omp_clause_mask mask, tree *cclauses) { tree block, clauses, ret; strcat (p_name, " sections"); mask |= OMP_SECTIONS_CLAUSE_MASK; if (cclauses) mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT); clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL); if (cclauses) { omp_split_clauses (loc, OMP_SECTIONS, mask, clauses, cclauses); clauses = cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]; } block = c_begin_compound_stmt (true); ret = c_parser_omp_sections_scope (loc, parser); if (ret) OMP_SECTIONS_CLAUSES (ret) = clauses; block = c_end_compound_stmt (loc, block, true); add_stmt (block); return ret; } /* OpenMP 2.5: # pragma omp parallel parallel-clause[optseq] new-line structured-block # pragma omp parallel for parallel-for-clause[optseq] new-line structured-block # pragma omp parallel sections parallel-sections-clause[optseq] new-line structured-block OpenMP 4.0: # pragma omp parallel for simd parallel-for-simd-clause[optseq] new-line structured-block LOC is the location of the #pragma token. */ #define OMP_PARALLEL_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PROC_BIND)) static tree c_parser_omp_parallel (location_t loc, c_parser *parser, char *p_name, omp_clause_mask mask, tree *cclauses, bool *if_p) { tree stmt, clauses, block; strcat (p_name, " parallel"); mask |= OMP_PARALLEL_CLAUSE_MASK; /* #pragma omp target parallel{, for, for simd} disallow copyin clause. */ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0 && (mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0) mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYIN); if (c_parser_next_token_is_keyword (parser, RID_FOR)) { tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT]; if (cclauses == NULL) cclauses = cclauses_buf; c_parser_consume_token (parser); if (!flag_openmp) /* flag_openmp_simd */ return c_parser_omp_for (loc, parser, p_name, mask, cclauses, if_p); block = c_begin_omp_parallel (); tree ret = c_parser_omp_for (loc, parser, p_name, mask, cclauses, if_p); stmt = c_finish_omp_parallel (loc, cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL], block); if (ret == NULL_TREE) return ret; OMP_PARALLEL_COMBINED (stmt) = 1; return stmt; } /* When combined with distribute, parallel has to be followed by for. #pragma omp target parallel is allowed though. */ else if (cclauses && (mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { error_at (loc, "expected %<for%> after %qs", p_name); c_parser_skip_to_pragma_eol (parser); return NULL_TREE; } else if (!flag_openmp) /* flag_openmp_simd */ { c_parser_skip_to_pragma_eol (parser, false); return NULL_TREE; } else if (cclauses == NULL && c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "sections") == 0) { tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT]; if (cclauses == NULL) cclauses = cclauses_buf; c_parser_consume_token (parser); block = c_begin_omp_parallel (); c_parser_omp_sections (loc, parser, p_name, mask, cclauses); stmt = c_finish_omp_parallel (loc, cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL], block); OMP_PARALLEL_COMBINED (stmt) = 1; return stmt; } } clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL); if (cclauses) { omp_split_clauses (loc, OMP_PARALLEL, mask, clauses, cclauses); clauses = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; } block = c_begin_omp_parallel (); c_parser_statement (parser, if_p); stmt = c_finish_omp_parallel (loc, clauses, block); return stmt; } /* OpenMP 2.5: # pragma omp single single-clause[optseq] new-line structured-block LOC is the location of the #pragma. */ #define OMP_SINGLE_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_single (location_t loc, c_parser *parser, bool *if_p) { tree stmt = make_node (OMP_SINGLE); SET_EXPR_LOCATION (stmt, loc); TREE_TYPE (stmt) = void_type_node; OMP_SINGLE_CLAUSES (stmt) = c_parser_omp_all_clauses (parser, OMP_SINGLE_CLAUSE_MASK, "#pragma omp single"); OMP_SINGLE_BODY (stmt) = c_parser_omp_structured_block (parser, if_p); return add_stmt (stmt); } /* OpenMP 3.0: # pragma omp task task-clause[optseq] new-line LOC is the location of the #pragma. */ #define OMP_TASK_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_UNTIED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FINAL) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MERGEABLE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIORITY)) static tree c_parser_omp_task (location_t loc, c_parser *parser, bool *if_p) { tree clauses, block; clauses = c_parser_omp_all_clauses (parser, OMP_TASK_CLAUSE_MASK, "#pragma omp task"); block = c_begin_omp_task (); c_parser_statement (parser, if_p); return c_finish_omp_task (loc, clauses, block); } /* OpenMP 3.0: # pragma omp taskwait new-line */ static void c_parser_omp_taskwait (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); c_finish_omp_taskwait (loc); } /* OpenMP 3.1: # pragma omp taskyield new-line */ static void c_parser_omp_taskyield (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); c_finish_omp_taskyield (loc); } /* OpenMP 4.0: # pragma omp taskgroup new-line */ static tree c_parser_omp_taskgroup (c_parser *parser, bool *if_p) { location_t loc = c_parser_peek_token (parser)->location; c_parser_skip_to_pragma_eol (parser); return c_finish_omp_taskgroup (loc, c_parser_omp_structured_block (parser, if_p)); } /* OpenMP 4.0: # pragma omp cancel cancel-clause[optseq] new-line LOC is the location of the #pragma. */ #define OMP_CANCEL_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PARALLEL) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FOR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SECTIONS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASKGROUP) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF)) static void c_parser_omp_cancel (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); tree clauses = c_parser_omp_all_clauses (parser, OMP_CANCEL_CLAUSE_MASK, "#pragma omp cancel"); c_finish_omp_cancel (loc, clauses); } /* OpenMP 4.0: # pragma omp cancellation point cancelpt-clause[optseq] new-line LOC is the location of the #pragma. */ #define OMP_CANCELLATION_POINT_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PARALLEL) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FOR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SECTIONS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASKGROUP)) static void c_parser_omp_cancellation_point (c_parser *parser, enum pragma_context context) { location_t loc = c_parser_peek_token (parser)->location; tree clauses; bool point_seen = false; c_parser_consume_pragma (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "point") == 0) { c_parser_consume_token (parser); point_seen = true; } } if (!point_seen) { c_parser_error (parser, "expected %<point%>"); c_parser_skip_to_pragma_eol (parser); return; } if (context != pragma_compound) { if (context == pragma_stmt) error_at (loc, "%<#pragma %s%> may only be used in compound statements", "omp cancellation point"); else c_parser_error (parser, "expected declaration specifiers"); c_parser_skip_to_pragma_eol (parser, false); return; } clauses = c_parser_omp_all_clauses (parser, OMP_CANCELLATION_POINT_CLAUSE_MASK, "#pragma omp cancellation point"); c_finish_omp_cancellation_point (loc, clauses); } /* OpenMP 4.0: #pragma omp distribute distribute-clause[optseq] new-line for-loop */ #define OMP_DISTRIBUTE_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)\ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE)) static tree c_parser_omp_distribute (location_t loc, c_parser *parser, char *p_name, omp_clause_mask mask, tree *cclauses, bool *if_p) { tree clauses, block, ret; strcat (p_name, " distribute"); mask |= OMP_DISTRIBUTE_CLAUSE_MASK; if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); bool simd = false; bool parallel = false; if (strcmp (p, "simd") == 0) simd = true; else parallel = strcmp (p, "parallel") == 0; if (parallel || simd) { tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT]; if (cclauses == NULL) cclauses = cclauses_buf; c_parser_consume_token (parser); if (!flag_openmp) /* flag_openmp_simd */ { if (simd) return c_parser_omp_simd (loc, parser, p_name, mask, cclauses, if_p); else return c_parser_omp_parallel (loc, parser, p_name, mask, cclauses, if_p); } block = c_begin_compound_stmt (true); if (simd) ret = c_parser_omp_simd (loc, parser, p_name, mask, cclauses, if_p); else ret = c_parser_omp_parallel (loc, parser, p_name, mask, cclauses, if_p); block = c_end_compound_stmt (loc, block, true); if (ret == NULL) return ret; ret = make_node (OMP_DISTRIBUTE); TREE_TYPE (ret) = void_type_node; OMP_FOR_BODY (ret) = block; OMP_FOR_CLAUSES (ret) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE]; SET_EXPR_LOCATION (ret, loc); add_stmt (ret); return ret; } } if (!flag_openmp) /* flag_openmp_simd */ { c_parser_skip_to_pragma_eol (parser, false); return NULL_TREE; } clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL); if (cclauses) { omp_split_clauses (loc, OMP_DISTRIBUTE, mask, clauses, cclauses); clauses = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE]; } block = c_begin_compound_stmt (true); ret = c_parser_omp_for_loop (loc, parser, OMP_DISTRIBUTE, clauses, NULL, if_p); block = c_end_compound_stmt (loc, block, true); add_stmt (block); return ret; } /* OpenMP 4.0: # pragma omp teams teams-clause[optseq] new-line structured-block */ #define OMP_TEAMS_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_THREAD_LIMIT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT)) static tree c_parser_omp_teams (location_t loc, c_parser *parser, char *p_name, omp_clause_mask mask, tree *cclauses, bool *if_p) { tree clauses, block, ret; strcat (p_name, " teams"); mask |= OMP_TEAMS_CLAUSE_MASK; if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "distribute") == 0) { tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT]; if (cclauses == NULL) cclauses = cclauses_buf; c_parser_consume_token (parser); if (!flag_openmp) /* flag_openmp_simd */ return c_parser_omp_distribute (loc, parser, p_name, mask, cclauses, if_p); block = c_begin_compound_stmt (true); ret = c_parser_omp_distribute (loc, parser, p_name, mask, cclauses, if_p); block = c_end_compound_stmt (loc, block, true); if (ret == NULL) return ret; clauses = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; ret = make_node (OMP_TEAMS); TREE_TYPE (ret) = void_type_node; OMP_TEAMS_CLAUSES (ret) = clauses; OMP_TEAMS_BODY (ret) = block; OMP_TEAMS_COMBINED (ret) = 1; return add_stmt (ret); } } if (!flag_openmp) /* flag_openmp_simd */ { c_parser_skip_to_pragma_eol (parser, false); return NULL_TREE; } clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL); if (cclauses) { omp_split_clauses (loc, OMP_TEAMS, mask, clauses, cclauses); clauses = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; } tree stmt = make_node (OMP_TEAMS); TREE_TYPE (stmt) = void_type_node; OMP_TEAMS_CLAUSES (stmt) = clauses; OMP_TEAMS_BODY (stmt) = c_parser_omp_structured_block (parser, if_p); return add_stmt (stmt); } /* OpenMP 4.0: # pragma omp target data target-data-clause[optseq] new-line structured-block */ #define OMP_TARGET_DATA_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_USE_DEVICE_PTR)) static tree c_parser_omp_target_data (location_t loc, c_parser *parser, bool *if_p) { tree clauses = c_parser_omp_all_clauses (parser, OMP_TARGET_DATA_CLAUSE_MASK, "#pragma omp target data"); int map_seen = 0; for (tree *pc = &clauses; *pc;) { if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_MAP) switch (OMP_CLAUSE_MAP_KIND (*pc)) { case GOMP_MAP_TO: case GOMP_MAP_ALWAYS_TO: case GOMP_MAP_FROM: case GOMP_MAP_ALWAYS_FROM: case GOMP_MAP_TOFROM: case GOMP_MAP_ALWAYS_TOFROM: case GOMP_MAP_ALLOC: map_seen = 3; break; case GOMP_MAP_FIRSTPRIVATE_POINTER: case GOMP_MAP_ALWAYS_POINTER: break; default: map_seen |= 1; error_at (OMP_CLAUSE_LOCATION (*pc), "%<#pragma omp target data%> with map-type other " "than %<to%>, %<from%>, %<tofrom%> or %<alloc%> " "on %<map%> clause"); *pc = OMP_CLAUSE_CHAIN (*pc); continue; } pc = &OMP_CLAUSE_CHAIN (*pc); } if (map_seen != 3) { if (map_seen == 0) error_at (loc, "%<#pragma omp target data%> must contain at least " "one %<map%> clause"); return NULL_TREE; } tree stmt = make_node (OMP_TARGET_DATA); TREE_TYPE (stmt) = void_type_node; OMP_TARGET_DATA_CLAUSES (stmt) = clauses; keep_next_level (); tree block = c_begin_compound_stmt (true); add_stmt (c_parser_omp_structured_block (parser, if_p)); OMP_TARGET_DATA_BODY (stmt) = c_end_compound_stmt (loc, block, true); SET_EXPR_LOCATION (stmt, loc); return add_stmt (stmt); } /* OpenMP 4.0: # pragma omp target update target-update-clause[optseq] new-line */ #define OMP_TARGET_UPDATE_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FROM) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TO) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT)) static bool c_parser_omp_target_update (location_t loc, c_parser *parser, enum pragma_context context) { if (context == pragma_stmt) { error_at (loc, "%<#pragma %s%> may only be used in compound statements", "omp target update"); c_parser_skip_to_pragma_eol (parser, false); return false; } tree clauses = c_parser_omp_all_clauses (parser, OMP_TARGET_UPDATE_CLAUSE_MASK, "#pragma omp target update"); if (omp_find_clause (clauses, OMP_CLAUSE_TO) == NULL_TREE && omp_find_clause (clauses, OMP_CLAUSE_FROM) == NULL_TREE) { error_at (loc, "%<#pragma omp target update%> must contain at least one " "%<from%> or %<to%> clauses"); return false; } tree stmt = make_node (OMP_TARGET_UPDATE); TREE_TYPE (stmt) = void_type_node; OMP_TARGET_UPDATE_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, loc); add_stmt (stmt); return false; } /* OpenMP 4.5: # pragma omp target enter data target-data-clause[optseq] new-line */ #define OMP_TARGET_ENTER_DATA_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_target_enter_data (location_t loc, c_parser *parser, enum pragma_context context) { bool data_seen = false; if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "data") == 0) { c_parser_consume_token (parser); data_seen = true; } } if (!data_seen) { c_parser_error (parser, "expected %<data%>"); c_parser_skip_to_pragma_eol (parser); return NULL_TREE; } if (context == pragma_stmt) { error_at (loc, "%<#pragma %s%> may only be used in compound statements", "omp target enter data"); c_parser_skip_to_pragma_eol (parser, false); return NULL_TREE; } tree clauses = c_parser_omp_all_clauses (parser, OMP_TARGET_ENTER_DATA_CLAUSE_MASK, "#pragma omp target enter data"); int map_seen = 0; for (tree *pc = &clauses; *pc;) { if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_MAP) switch (OMP_CLAUSE_MAP_KIND (*pc)) { case GOMP_MAP_TO: case GOMP_MAP_ALWAYS_TO: case GOMP_MAP_ALLOC: map_seen = 3; break; case GOMP_MAP_FIRSTPRIVATE_POINTER: case GOMP_MAP_ALWAYS_POINTER: break; default: map_seen |= 1; error_at (OMP_CLAUSE_LOCATION (*pc), "%<#pragma omp target enter data%> with map-type other " "than %<to%> or %<alloc%> on %<map%> clause"); *pc = OMP_CLAUSE_CHAIN (*pc); continue; } pc = &OMP_CLAUSE_CHAIN (*pc); } if (map_seen != 3) { if (map_seen == 0) error_at (loc, "%<#pragma omp target enter data%> must contain at least " "one %<map%> clause"); return NULL_TREE; } tree stmt = make_node (OMP_TARGET_ENTER_DATA); TREE_TYPE (stmt) = void_type_node; OMP_TARGET_ENTER_DATA_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, loc); add_stmt (stmt); return stmt; } /* OpenMP 4.5: # pragma omp target exit data target-data-clause[optseq] new-line */ #define OMP_TARGET_EXIT_DATA_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_target_exit_data (location_t loc, c_parser *parser, enum pragma_context context) { bool data_seen = false; if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "data") == 0) { c_parser_consume_token (parser); data_seen = true; } } if (!data_seen) { c_parser_error (parser, "expected %<data%>"); c_parser_skip_to_pragma_eol (parser); return NULL_TREE; } if (context == pragma_stmt) { error_at (loc, "%<#pragma %s%> may only be used in compound statements", "omp target exit data"); c_parser_skip_to_pragma_eol (parser, false); return NULL_TREE; } tree clauses = c_parser_omp_all_clauses (parser, OMP_TARGET_EXIT_DATA_CLAUSE_MASK, "#pragma omp target exit data"); int map_seen = 0; for (tree *pc = &clauses; *pc;) { if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_MAP) switch (OMP_CLAUSE_MAP_KIND (*pc)) { case GOMP_MAP_FROM: case GOMP_MAP_ALWAYS_FROM: case GOMP_MAP_RELEASE: case GOMP_MAP_DELETE: map_seen = 3; break; case GOMP_MAP_FIRSTPRIVATE_POINTER: case GOMP_MAP_ALWAYS_POINTER: break; default: map_seen |= 1; error_at (OMP_CLAUSE_LOCATION (*pc), "%<#pragma omp target exit data%> with map-type other " "than %<from%>, %<release%> or %<delete%> on %<map%>" " clause"); *pc = OMP_CLAUSE_CHAIN (*pc); continue; } pc = &OMP_CLAUSE_CHAIN (*pc); } if (map_seen != 3) { if (map_seen == 0) error_at (loc, "%<#pragma omp target exit data%> must contain at least one " "%<map%> clause"); return NULL_TREE; } tree stmt = make_node (OMP_TARGET_EXIT_DATA); TREE_TYPE (stmt) = void_type_node; OMP_TARGET_EXIT_DATA_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, loc); add_stmt (stmt); return stmt; } /* OpenMP 4.0: # pragma omp target target-clause[optseq] new-line structured-block */ #define OMP_TARGET_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULTMAP) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IS_DEVICE_PTR)) static bool c_parser_omp_target (c_parser *parser, enum pragma_context context, bool *if_p) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); tree *pc = NULL, stmt, block; if (context != pragma_stmt && context != pragma_compound) { c_parser_error (parser, "expected declaration specifiers"); c_parser_skip_to_pragma_eol (parser); return false; } if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); enum tree_code ccode = ERROR_MARK; if (strcmp (p, "teams") == 0) ccode = OMP_TEAMS; else if (strcmp (p, "parallel") == 0) ccode = OMP_PARALLEL; else if (strcmp (p, "simd") == 0) ccode = OMP_SIMD; if (ccode != ERROR_MARK) { tree cclauses[C_OMP_CLAUSE_SPLIT_COUNT]; char p_name[sizeof ("#pragma omp target teams distribute " "parallel for simd")]; c_parser_consume_token (parser); strcpy (p_name, "#pragma omp target"); if (!flag_openmp) /* flag_openmp_simd */ { tree stmt; switch (ccode) { case OMP_TEAMS: stmt = c_parser_omp_teams (loc, parser, p_name, OMP_TARGET_CLAUSE_MASK, cclauses, if_p); break; case OMP_PARALLEL: stmt = c_parser_omp_parallel (loc, parser, p_name, OMP_TARGET_CLAUSE_MASK, cclauses, if_p); break; case OMP_SIMD: stmt = c_parser_omp_simd (loc, parser, p_name, OMP_TARGET_CLAUSE_MASK, cclauses, if_p); break; default: gcc_unreachable (); } return stmt != NULL_TREE; } keep_next_level (); tree block = c_begin_compound_stmt (true), ret; switch (ccode) { case OMP_TEAMS: ret = c_parser_omp_teams (loc, parser, p_name, OMP_TARGET_CLAUSE_MASK, cclauses, if_p); break; case OMP_PARALLEL: ret = c_parser_omp_parallel (loc, parser, p_name, OMP_TARGET_CLAUSE_MASK, cclauses, if_p); break; case OMP_SIMD: ret = c_parser_omp_simd (loc, parser, p_name, OMP_TARGET_CLAUSE_MASK, cclauses, if_p); break; default: gcc_unreachable (); } block = c_end_compound_stmt (loc, block, true); if (ret == NULL_TREE) return false; if (ccode == OMP_TEAMS) { /* For combined target teams, ensure the num_teams and thread_limit clause expressions are evaluated on the host, before entering the target construct. */ tree c; for (c = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; c; c = OMP_CLAUSE_CHAIN (c)) if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_TEAMS || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_THREAD_LIMIT) && TREE_CODE (OMP_CLAUSE_OPERAND (c, 0)) != INTEGER_CST) { tree expr = OMP_CLAUSE_OPERAND (c, 0); tree tmp = create_tmp_var_raw (TREE_TYPE (expr)); expr = build4 (TARGET_EXPR, TREE_TYPE (expr), tmp, expr, NULL_TREE, NULL_TREE); add_stmt (expr); OMP_CLAUSE_OPERAND (c, 0) = expr; tree tc = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (tc) = tmp; OMP_CLAUSE_CHAIN (tc) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = tc; } } tree stmt = make_node (OMP_TARGET); TREE_TYPE (stmt) = void_type_node; OMP_TARGET_CLAUSES (stmt) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; OMP_TARGET_BODY (stmt) = block; OMP_TARGET_COMBINED (stmt) = 1; add_stmt (stmt); pc = &OMP_TARGET_CLAUSES (stmt); goto check_clauses; } else if (!flag_openmp) /* flag_openmp_simd */ { c_parser_skip_to_pragma_eol (parser, false); return false; } else if (strcmp (p, "data") == 0) { c_parser_consume_token (parser); c_parser_omp_target_data (loc, parser, if_p); return true; } else if (strcmp (p, "enter") == 0) { c_parser_consume_token (parser); c_parser_omp_target_enter_data (loc, parser, context); return false; } else if (strcmp (p, "exit") == 0) { c_parser_consume_token (parser); c_parser_omp_target_exit_data (loc, parser, context); return false; } else if (strcmp (p, "update") == 0) { c_parser_consume_token (parser); return c_parser_omp_target_update (loc, parser, context); } } if (!flag_openmp) /* flag_openmp_simd */ { c_parser_skip_to_pragma_eol (parser, false); return false; } stmt = make_node (OMP_TARGET); TREE_TYPE (stmt) = void_type_node; OMP_TARGET_CLAUSES (stmt) = c_parser_omp_all_clauses (parser, OMP_TARGET_CLAUSE_MASK, "#pragma omp target"); pc = &OMP_TARGET_CLAUSES (stmt); keep_next_level (); block = c_begin_compound_stmt (true); add_stmt (c_parser_omp_structured_block (parser, if_p)); OMP_TARGET_BODY (stmt) = c_end_compound_stmt (loc, block, true); SET_EXPR_LOCATION (stmt, loc); add_stmt (stmt); check_clauses: while (*pc) { if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_MAP) switch (OMP_CLAUSE_MAP_KIND (*pc)) { case GOMP_MAP_TO: case GOMP_MAP_ALWAYS_TO: case GOMP_MAP_FROM: case GOMP_MAP_ALWAYS_FROM: case GOMP_MAP_TOFROM: case GOMP_MAP_ALWAYS_TOFROM: case GOMP_MAP_ALLOC: case GOMP_MAP_FIRSTPRIVATE_POINTER: case GOMP_MAP_ALWAYS_POINTER: break; default: error_at (OMP_CLAUSE_LOCATION (*pc), "%<#pragma omp target%> with map-type other " "than %<to%>, %<from%>, %<tofrom%> or %<alloc%> " "on %<map%> clause"); *pc = OMP_CLAUSE_CHAIN (*pc); continue; } pc = &OMP_CLAUSE_CHAIN (*pc); } return true; } /* OpenMP 4.0: # pragma omp declare simd declare-simd-clauses[optseq] new-line */ #define OMP_DECLARE_SIMD_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SIMDLEN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALIGNED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_UNIFORM) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_INBRANCH) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOTINBRANCH)) static void c_parser_omp_declare_simd (c_parser *parser, enum pragma_context context) { auto_vec<c_token> clauses; while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) { c_token *token = c_parser_peek_token (parser); if (token->type == CPP_EOF) { c_parser_skip_to_pragma_eol (parser); return; } clauses.safe_push (*token); c_parser_consume_token (parser); } clauses.safe_push (*c_parser_peek_token (parser)); c_parser_skip_to_pragma_eol (parser); while (c_parser_next_token_is (parser, CPP_PRAGMA)) { if (c_parser_peek_token (parser)->pragma_kind != PRAGMA_OMP_DECLARE || c_parser_peek_2nd_token (parser)->type != CPP_NAME || strcmp (IDENTIFIER_POINTER (c_parser_peek_2nd_token (parser)->value), "simd") != 0) { c_parser_error (parser, "%<#pragma omp declare simd%> must be followed by " "function declaration or definition or another " "%<#pragma omp declare simd%>"); return; } c_parser_consume_pragma (parser); while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) { c_token *token = c_parser_peek_token (parser); if (token->type == CPP_EOF) { c_parser_skip_to_pragma_eol (parser); return; } clauses.safe_push (*token); c_parser_consume_token (parser); } clauses.safe_push (*c_parser_peek_token (parser)); c_parser_skip_to_pragma_eol (parser); } /* Make sure nothing tries to read past the end of the tokens. */ c_token eof_token; memset (&eof_token, 0, sizeof (eof_token)); eof_token.type = CPP_EOF; clauses.safe_push (eof_token); clauses.safe_push (eof_token); switch (context) { case pragma_external: if (c_parser_next_token_is (parser, CPP_KEYWORD) && c_parser_peek_token (parser)->keyword == RID_EXTENSION) { int ext = disable_extension_diagnostics (); do c_parser_consume_token (parser); while (c_parser_next_token_is (parser, CPP_KEYWORD) && c_parser_peek_token (parser)->keyword == RID_EXTENSION); c_parser_declaration_or_fndef (parser, true, true, true, false, true, NULL, clauses); restore_extension_diagnostics (ext); } else c_parser_declaration_or_fndef (parser, true, true, true, false, true, NULL, clauses); break; case pragma_struct: case pragma_param: case pragma_stmt: c_parser_error (parser, "%<#pragma omp declare simd%> must be followed by " "function declaration or definition"); break; case pragma_compound: if (c_parser_next_token_is (parser, CPP_KEYWORD) && c_parser_peek_token (parser)->keyword == RID_EXTENSION) { int ext = disable_extension_diagnostics (); do c_parser_consume_token (parser); while (c_parser_next_token_is (parser, CPP_KEYWORD) && c_parser_peek_token (parser)->keyword == RID_EXTENSION); if (c_parser_next_tokens_start_declaration (parser)) { c_parser_declaration_or_fndef (parser, true, true, true, true, true, NULL, clauses); restore_extension_diagnostics (ext); break; } restore_extension_diagnostics (ext); } else if (c_parser_next_tokens_start_declaration (parser)) { c_parser_declaration_or_fndef (parser, true, true, true, true, true, NULL, clauses); break; } c_parser_error (parser, "%<#pragma omp declare simd%> must be followed by " "function declaration or definition"); break; default: gcc_unreachable (); } } /* Finalize #pragma omp declare simd clauses after FNDECL has been parsed, and put that into "omp declare simd" attribute. */ static void c_finish_omp_declare_simd (c_parser *parser, tree fndecl, tree parms, vec<c_token> clauses) { /* Normally first token is CPP_NAME "simd". CPP_EOF there indicates error has been reported and CPP_PRAGMA that c_finish_omp_declare_simd has already processed the tokens. */ if (clauses.exists () && clauses[0].type == CPP_EOF) return; if (fndecl == NULL_TREE || TREE_CODE (fndecl) != FUNCTION_DECL) { error ("%<#pragma omp declare simd%> not immediately followed by " "a function declaration or definition"); clauses[0].type = CPP_EOF; return; } if (clauses.exists () && clauses[0].type != CPP_NAME) { error_at (DECL_SOURCE_LOCATION (fndecl), "%<#pragma omp declare simd%> not immediately followed by " "a single function declaration or definition"); clauses[0].type = CPP_EOF; return; } if (parms == NULL_TREE) parms = DECL_ARGUMENTS (fndecl); unsigned int tokens_avail = parser->tokens_avail; gcc_assert (parser->tokens == &parser->tokens_buf[0]); parser->tokens = clauses.address (); parser->tokens_avail = clauses.length (); /* c_parser_omp_declare_simd pushed 2 extra CPP_EOF tokens at the end. */ while (parser->tokens_avail > 3) { c_token *token = c_parser_peek_token (parser); gcc_assert (token->type == CPP_NAME && strcmp (IDENTIFIER_POINTER (token->value), "simd") == 0); c_parser_consume_token (parser); parser->in_pragma = true; tree c = NULL_TREE; c = c_parser_omp_all_clauses (parser, OMP_DECLARE_SIMD_CLAUSE_MASK, "#pragma omp declare simd"); c = c_omp_declare_simd_clauses_to_numbers (parms, c); if (c != NULL_TREE) c = tree_cons (NULL_TREE, c, NULL_TREE); c = build_tree_list (get_identifier ("omp declare simd"), c); TREE_CHAIN (c) = DECL_ATTRIBUTES (fndecl); DECL_ATTRIBUTES (fndecl) = c; } parser->tokens = &parser->tokens_buf[0]; parser->tokens_avail = tokens_avail; if (clauses.exists ()) clauses[0].type = CPP_PRAGMA; } /* OpenMP 4.0: # pragma omp declare target new-line declarations and definitions # pragma omp end declare target new-line OpenMP 4.5: # pragma omp declare target ( extended-list ) new-line # pragma omp declare target declare-target-clauses[seq] new-line */ #define OMP_DECLARE_TARGET_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TO) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINK)) static void c_parser_omp_declare_target (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; tree clauses = NULL_TREE; if (c_parser_next_token_is (parser, CPP_NAME)) clauses = c_parser_omp_all_clauses (parser, OMP_DECLARE_TARGET_CLAUSE_MASK, "#pragma omp declare target"); else if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { clauses = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_TO_DECLARE, clauses); clauses = c_finish_omp_clauses (clauses, C_ORT_OMP); c_parser_skip_to_pragma_eol (parser); } else { c_parser_skip_to_pragma_eol (parser); current_omp_declare_target_attribute++; return; } if (current_omp_declare_target_attribute) error_at (loc, "%<#pragma omp declare target%> with clauses in between " "%<#pragma omp declare target%> without clauses and " "%<#pragma omp end declare target%>"); for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { tree t = OMP_CLAUSE_DECL (c), id; tree at1 = lookup_attribute ("omp declare target", DECL_ATTRIBUTES (t)); tree at2 = lookup_attribute ("omp declare target link", DECL_ATTRIBUTES (t)); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINK) { id = get_identifier ("omp declare target link"); std::swap (at1, at2); } else id = get_identifier ("omp declare target"); if (at2) { error_at (OMP_CLAUSE_LOCATION (c), "%qD specified both in declare target %<link%> and %<to%>" " clauses", t); continue; } if (!at1) { DECL_ATTRIBUTES (t) = tree_cons (id, NULL_TREE, DECL_ATTRIBUTES (t)); if (TREE_CODE (t) != FUNCTION_DECL && !is_global_var (t)) continue; symtab_node *node = symtab_node::get (t); if (node != NULL) { node->offloadable = 1; if (ENABLE_OFFLOADING) { g->have_offload = true; if (is_a <varpool_node *> (node)) vec_safe_push (offload_vars, t); } } } } } static void c_parser_omp_end_declare_target (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); if (c_parser_next_token_is (parser, CPP_NAME) && strcmp (IDENTIFIER_POINTER (c_parser_peek_token (parser)->value), "declare") == 0) { c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME) && strcmp (IDENTIFIER_POINTER (c_parser_peek_token (parser)->value), "target") == 0) c_parser_consume_token (parser); else { c_parser_error (parser, "expected %<target%>"); c_parser_skip_to_pragma_eol (parser); return; } } else { c_parser_error (parser, "expected %<declare%>"); c_parser_skip_to_pragma_eol (parser); return; } c_parser_skip_to_pragma_eol (parser); if (!current_omp_declare_target_attribute) error_at (loc, "%<#pragma omp end declare target%> without corresponding " "%<#pragma omp declare target%>"); else current_omp_declare_target_attribute--; } /* OpenMP 4.0 #pragma omp declare reduction (reduction-id : typename-list : expression) \ initializer-clause[opt] new-line initializer-clause: initializer (omp_priv = initializer) initializer (function-name (argument-list)) */ static void c_parser_omp_declare_reduction (c_parser *parser, enum pragma_context context) { unsigned int tokens_avail = 0, i; vec<tree> types = vNULL; vec<c_token> clauses = vNULL; enum tree_code reduc_code = ERROR_MARK; tree reduc_id = NULL_TREE; tree type; location_t rloc = c_parser_peek_token (parser)->location; if (context == pragma_struct || context == pragma_param) { error ("%<#pragma omp declare reduction%> not at file or block scope"); goto fail; } if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) goto fail; switch (c_parser_peek_token (parser)->type) { case CPP_PLUS: reduc_code = PLUS_EXPR; break; case CPP_MULT: reduc_code = MULT_EXPR; break; case CPP_MINUS: reduc_code = MINUS_EXPR; break; case CPP_AND: reduc_code = BIT_AND_EXPR; break; case CPP_XOR: reduc_code = BIT_XOR_EXPR; break; case CPP_OR: reduc_code = BIT_IOR_EXPR; break; case CPP_AND_AND: reduc_code = TRUTH_ANDIF_EXPR; break; case CPP_OR_OR: reduc_code = TRUTH_ORIF_EXPR; break; case CPP_NAME: const char *p; p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "min") == 0) { reduc_code = MIN_EXPR; break; } if (strcmp (p, "max") == 0) { reduc_code = MAX_EXPR; break; } reduc_id = c_parser_peek_token (parser)->value; break; default: c_parser_error (parser, "expected %<+%>, %<*%>, %<-%>, %<&%>, " "%<^%>, %<|%>, %<&&%>, %<||%> or identifier"); goto fail; } tree orig_reduc_id, reduc_decl; orig_reduc_id = reduc_id; reduc_id = c_omp_reduction_id (reduc_code, reduc_id); reduc_decl = c_omp_reduction_decl (reduc_id); c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) goto fail; while (true) { location_t loc = c_parser_peek_token (parser)->location; struct c_type_name *ctype = c_parser_type_name (parser); if (ctype != NULL) { type = groktypename (ctype, NULL, NULL); if (type == error_mark_node) ; else if ((INTEGRAL_TYPE_P (type) || TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == COMPLEX_TYPE) && orig_reduc_id == NULL_TREE) error_at (loc, "predeclared arithmetic type in " "%<#pragma omp declare reduction%>"); else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == ARRAY_TYPE) error_at (loc, "function or array type in " "%<#pragma omp declare reduction%>"); else if (TYPE_ATOMIC (type)) error_at (loc, "%<_Atomic%> qualified type in " "%<#pragma omp declare reduction%>"); else if (TYPE_QUALS_NO_ADDR_SPACE (type)) error_at (loc, "const, volatile or restrict qualified type in " "%<#pragma omp declare reduction%>"); else { tree t; for (t = DECL_INITIAL (reduc_decl); t; t = TREE_CHAIN (t)) if (comptypes (TREE_PURPOSE (t), type)) { error_at (loc, "redeclaration of %qs " "%<#pragma omp declare reduction%> for " "type %qT", IDENTIFIER_POINTER (reduc_id) + sizeof ("omp declare reduction ") - 1, type); location_t ploc = DECL_SOURCE_LOCATION (TREE_VEC_ELT (TREE_VALUE (t), 0)); error_at (ploc, "previous %<#pragma omp declare " "reduction%>"); break; } if (t == NULL_TREE) types.safe_push (type); } if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } else break; } if (!c_parser_require (parser, CPP_COLON, "expected %<:%>") || types.is_empty ()) { fail: clauses.release (); types.release (); while (true) { c_token *token = c_parser_peek_token (parser); if (token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL) break; c_parser_consume_token (parser); } c_parser_skip_to_pragma_eol (parser); return; } if (types.length () > 1) { while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) { c_token *token = c_parser_peek_token (parser); if (token->type == CPP_EOF) goto fail; clauses.safe_push (*token); c_parser_consume_token (parser); } clauses.safe_push (*c_parser_peek_token (parser)); c_parser_skip_to_pragma_eol (parser); /* Make sure nothing tries to read past the end of the tokens. */ c_token eof_token; memset (&eof_token, 0, sizeof (eof_token)); eof_token.type = CPP_EOF; clauses.safe_push (eof_token); clauses.safe_push (eof_token); } int errs = errorcount; FOR_EACH_VEC_ELT (types, i, type) { tokens_avail = parser->tokens_avail; gcc_assert (parser->tokens == &parser->tokens_buf[0]); if (!clauses.is_empty ()) { parser->tokens = clauses.address (); parser->tokens_avail = clauses.length (); parser->in_pragma = true; } bool nested = current_function_decl != NULL_TREE; if (nested) c_push_function_context (); tree fndecl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, reduc_id, default_function_type); current_function_decl = fndecl; allocate_struct_function (fndecl, true); push_scope (); tree stmt = push_stmt_list (); /* Intentionally BUILTINS_LOCATION, so that -Wshadow doesn't warn about these. */ tree omp_out = build_decl (BUILTINS_LOCATION, VAR_DECL, get_identifier ("omp_out"), type); DECL_ARTIFICIAL (omp_out) = 1; DECL_CONTEXT (omp_out) = fndecl; pushdecl (omp_out); tree omp_in = build_decl (BUILTINS_LOCATION, VAR_DECL, get_identifier ("omp_in"), type); DECL_ARTIFICIAL (omp_in) = 1; DECL_CONTEXT (omp_in) = fndecl; pushdecl (omp_in); struct c_expr combiner = c_parser_expression (parser); struct c_expr initializer; tree omp_priv = NULL_TREE, omp_orig = NULL_TREE; bool bad = false; initializer.set_error (); if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) bad = true; else if (c_parser_next_token_is (parser, CPP_NAME) && strcmp (IDENTIFIER_POINTER (c_parser_peek_token (parser)->value), "initializer") == 0) { c_parser_consume_token (parser); pop_scope (); push_scope (); omp_priv = build_decl (BUILTINS_LOCATION, VAR_DECL, get_identifier ("omp_priv"), type); DECL_ARTIFICIAL (omp_priv) = 1; DECL_INITIAL (omp_priv) = error_mark_node; DECL_CONTEXT (omp_priv) = fndecl; pushdecl (omp_priv); omp_orig = build_decl (BUILTINS_LOCATION, VAR_DECL, get_identifier ("omp_orig"), type); DECL_ARTIFICIAL (omp_orig) = 1; DECL_CONTEXT (omp_orig) = fndecl; pushdecl (omp_orig); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) bad = true; else if (!c_parser_next_token_is (parser, CPP_NAME)) { c_parser_error (parser, "expected %<omp_priv%> or " "function-name"); bad = true; } else if (strcmp (IDENTIFIER_POINTER (c_parser_peek_token (parser)->value), "omp_priv") != 0) { if (c_parser_peek_2nd_token (parser)->type != CPP_OPEN_PAREN || c_parser_peek_token (parser)->id_kind != C_ID_ID) { c_parser_error (parser, "expected function-name %<(%>"); bad = true; } else initializer = c_parser_postfix_expression (parser); if (initializer.value && TREE_CODE (initializer.value) == CALL_EXPR) { int j; tree c = initializer.value; for (j = 0; j < call_expr_nargs (c); j++) { tree a = CALL_EXPR_ARG (c, j); STRIP_NOPS (a); if (TREE_CODE (a) == ADDR_EXPR && TREE_OPERAND (a, 0) == omp_priv) break; } if (j == call_expr_nargs (c)) error ("one of the initializer call arguments should be " "%<&omp_priv%>"); } } else { c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_EQ, "expected %<=%>")) bad = true; else { tree st = push_stmt_list (); location_t loc = c_parser_peek_token (parser)->location; rich_location richloc (line_table, loc); start_init (omp_priv, NULL_TREE, 0, &richloc); struct c_expr init = c_parser_initializer (parser); finish_init (); finish_decl (omp_priv, loc, init.value, init.original_type, NULL_TREE); pop_stmt_list (st); } } if (!bad && !c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) bad = true; } if (!bad) { c_parser_skip_to_pragma_eol (parser); tree t = tree_cons (type, make_tree_vec (omp_priv ? 6 : 3), DECL_INITIAL (reduc_decl)); DECL_INITIAL (reduc_decl) = t; DECL_SOURCE_LOCATION (omp_out) = rloc; TREE_VEC_ELT (TREE_VALUE (t), 0) = omp_out; TREE_VEC_ELT (TREE_VALUE (t), 1) = omp_in; TREE_VEC_ELT (TREE_VALUE (t), 2) = combiner.value; walk_tree (&combiner.value, c_check_omp_declare_reduction_r, &TREE_VEC_ELT (TREE_VALUE (t), 0), NULL); if (omp_priv) { DECL_SOURCE_LOCATION (omp_priv) = rloc; TREE_VEC_ELT (TREE_VALUE (t), 3) = omp_priv; TREE_VEC_ELT (TREE_VALUE (t), 4) = omp_orig; TREE_VEC_ELT (TREE_VALUE (t), 5) = initializer.value; walk_tree (&initializer.value, c_check_omp_declare_reduction_r, &TREE_VEC_ELT (TREE_VALUE (t), 3), NULL); walk_tree (&DECL_INITIAL (omp_priv), c_check_omp_declare_reduction_r, &TREE_VEC_ELT (TREE_VALUE (t), 3), NULL); } } pop_stmt_list (stmt); pop_scope (); if (cfun->language != NULL) { ggc_free (cfun->language); cfun->language = NULL; } set_cfun (NULL); current_function_decl = NULL_TREE; if (nested) c_pop_function_context (); if (!clauses.is_empty ()) { parser->tokens = &parser->tokens_buf[0]; parser->tokens_avail = tokens_avail; } if (bad) goto fail; if (errs != errorcount) break; } clauses.release (); types.release (); } /* OpenMP 4.0 #pragma omp declare simd declare-simd-clauses[optseq] new-line #pragma omp declare reduction (reduction-id : typename-list : expression) \ initializer-clause[opt] new-line #pragma omp declare target new-line */ static void c_parser_omp_declare (c_parser *parser, enum pragma_context context) { c_parser_consume_pragma (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "simd") == 0) { /* c_parser_consume_token (parser); done in c_parser_omp_declare_simd. */ c_parser_omp_declare_simd (parser, context); return; } if (strcmp (p, "reduction") == 0) { c_parser_consume_token (parser); c_parser_omp_declare_reduction (parser, context); return; } if (!flag_openmp) /* flag_openmp_simd */ { c_parser_skip_to_pragma_eol (parser, false); return; } if (strcmp (p, "target") == 0) { c_parser_consume_token (parser); c_parser_omp_declare_target (parser); return; } } c_parser_error (parser, "expected %<simd%> or %<reduction%> " "or %<target%>"); c_parser_skip_to_pragma_eol (parser); } /* OpenMP 4.5: #pragma omp taskloop taskloop-clause[optseq] new-line for-loop #pragma omp taskloop simd taskloop-simd-clause[optseq] new-line for-loop */ #define OMP_TASKLOOP_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_GRAINSIZE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TASKS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_UNTIED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FINAL) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MERGEABLE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIORITY)) static tree c_parser_omp_taskloop (location_t loc, c_parser *parser, char *p_name, omp_clause_mask mask, tree *cclauses, bool *if_p) { tree clauses, block, ret; strcat (p_name, " taskloop"); mask |= OMP_TASKLOOP_CLAUSE_MASK; if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "simd") == 0) { tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT]; if (cclauses == NULL) cclauses = cclauses_buf; mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION); c_parser_consume_token (parser); if (!flag_openmp) /* flag_openmp_simd */ return c_parser_omp_simd (loc, parser, p_name, mask, cclauses, if_p); block = c_begin_compound_stmt (true); ret = c_parser_omp_simd (loc, parser, p_name, mask, cclauses, if_p); block = c_end_compound_stmt (loc, block, true); if (ret == NULL) return ret; ret = make_node (OMP_TASKLOOP); TREE_TYPE (ret) = void_type_node; OMP_FOR_BODY (ret) = block; OMP_FOR_CLAUSES (ret) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP]; SET_EXPR_LOCATION (ret, loc); add_stmt (ret); return ret; } } if (!flag_openmp) /* flag_openmp_simd */ { c_parser_skip_to_pragma_eol (parser, false); return NULL_TREE; } clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL); if (cclauses) { omp_split_clauses (loc, OMP_TASKLOOP, mask, clauses, cclauses); clauses = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP]; } block = c_begin_compound_stmt (true); ret = c_parser_omp_for_loop (loc, parser, OMP_TASKLOOP, clauses, NULL, if_p); block = c_end_compound_stmt (loc, block, true); add_stmt (block); return ret; } /* Main entry point to parsing most OpenMP pragmas. */ static void c_parser_omp_construct (c_parser *parser, bool *if_p) { enum pragma_kind p_kind; location_t loc; tree stmt; char p_name[sizeof "#pragma omp teams distribute parallel for simd"]; omp_clause_mask mask (0); loc = c_parser_peek_token (parser)->location; p_kind = c_parser_peek_token (parser)->pragma_kind; c_parser_consume_pragma (parser); switch (p_kind) { case PRAGMA_OACC_ATOMIC: c_parser_omp_atomic (loc, parser); return; case PRAGMA_OACC_CACHE: strcpy (p_name, "#pragma acc"); stmt = c_parser_oacc_cache (loc, parser); break; case PRAGMA_OACC_DATA: stmt = c_parser_oacc_data (loc, parser, if_p); break; case PRAGMA_OACC_HOST_DATA: stmt = c_parser_oacc_host_data (loc, parser, if_p); break; case PRAGMA_OACC_KERNELS: case PRAGMA_OACC_PARALLEL: strcpy (p_name, "#pragma acc"); stmt = c_parser_oacc_kernels_parallel (loc, parser, p_kind, p_name, if_p); break; case PRAGMA_OACC_LOOP: strcpy (p_name, "#pragma acc"); stmt = c_parser_oacc_loop (loc, parser, p_name, mask, NULL, if_p); break; case PRAGMA_OACC_WAIT: strcpy (p_name, "#pragma wait"); stmt = c_parser_oacc_wait (loc, parser, p_name); break; case PRAGMA_OMP_ATOMIC: c_parser_omp_atomic (loc, parser); return; case PRAGMA_OMP_CRITICAL: stmt = c_parser_omp_critical (loc, parser, if_p); break; case PRAGMA_OMP_DISTRIBUTE: strcpy (p_name, "#pragma omp"); stmt = c_parser_omp_distribute (loc, parser, p_name, mask, NULL, if_p); break; case PRAGMA_OMP_FOR: strcpy (p_name, "#pragma omp"); stmt = c_parser_omp_for (loc, parser, p_name, mask, NULL, if_p); break; case PRAGMA_OMP_MASTER: stmt = c_parser_omp_master (loc, parser, if_p); break; case PRAGMA_OMP_PARALLEL: strcpy (p_name, "#pragma omp"); stmt = c_parser_omp_parallel (loc, parser, p_name, mask, NULL, if_p); break; case PRAGMA_OMP_SECTIONS: strcpy (p_name, "#pragma omp"); stmt = c_parser_omp_sections (loc, parser, p_name, mask, NULL); break; case PRAGMA_OMP_SIMD: strcpy (p_name, "#pragma omp"); stmt = c_parser_omp_simd (loc, parser, p_name, mask, NULL, if_p); break; case PRAGMA_OMP_SINGLE: stmt = c_parser_omp_single (loc, parser, if_p); break; case PRAGMA_OMP_TASK: stmt = c_parser_omp_task (loc, parser, if_p); break; case PRAGMA_OMP_TASKGROUP: stmt = c_parser_omp_taskgroup (parser, if_p); break; case PRAGMA_OMP_TASKLOOP: strcpy (p_name, "#pragma omp"); stmt = c_parser_omp_taskloop (loc, parser, p_name, mask, NULL, if_p); break; case PRAGMA_OMP_TEAMS: strcpy (p_name, "#pragma omp"); stmt = c_parser_omp_teams (loc, parser, p_name, mask, NULL, if_p); break; default: gcc_unreachable (); } if (stmt) gcc_assert (EXPR_LOCATION (stmt) != UNKNOWN_LOCATION); } /* OpenMP 2.5: # pragma omp threadprivate (variable-list) */ static void c_parser_omp_threadprivate (c_parser *parser) { tree vars, t; location_t loc; c_parser_consume_pragma (parser); loc = c_parser_peek_token (parser)->location; vars = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_ERROR, NULL); /* Mark every variable in VARS to be assigned thread local storage. */ for (t = vars; t; t = TREE_CHAIN (t)) { tree v = TREE_PURPOSE (t); /* FIXME diagnostics: Ideally we should keep individual locations for all the variables in the var list to make the following errors more precise. Perhaps c_parser_omp_var_list_parens() should construct a list of locations to go along with the var list. */ /* If V had already been marked threadprivate, it doesn't matter whether it had been used prior to this point. */ if (!VAR_P (v)) error_at (loc, "%qD is not a variable", v); else if (TREE_USED (v) && !C_DECL_THREADPRIVATE_P (v)) error_at (loc, "%qE declared %<threadprivate%> after first use", v); else if (! is_global_var (v)) error_at (loc, "automatic variable %qE cannot be %<threadprivate%>", v); else if (TREE_TYPE (v) == error_mark_node) ; else if (! COMPLETE_TYPE_P (TREE_TYPE (v))) error_at (loc, "%<threadprivate%> %qE has incomplete type", v); else { if (! DECL_THREAD_LOCAL_P (v)) { set_decl_tls_model (v, decl_default_tls_model (v)); /* If rtl has been already set for this var, call make_decl_rtl once again, so that encode_section_info has a chance to look at the new decl flags. */ if (DECL_RTL_SET_P (v)) make_decl_rtl (v); } C_DECL_THREADPRIVATE_P (v) = 1; } } c_parser_skip_to_pragma_eol (parser); } /* Parse a transaction attribute (GCC Extension). transaction-attribute: attributes [ [ any-word ] ] The transactional memory language description is written for C++, and uses the C++0x attribute syntax. For compatibility, allow the bracket style for transactions in C as well. */ static tree c_parser_transaction_attributes (c_parser *parser) { tree attr_name, attr = NULL; if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) return c_parser_attributes (parser); if (!c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) return NULL_TREE; c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_SQUARE, "expected %<[%>")) goto error1; attr_name = c_parser_attribute_any_word (parser); if (attr_name) { c_parser_consume_token (parser); attr = build_tree_list (attr_name, NULL_TREE); } else c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); error1: c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); return attr; } /* Parse a __transaction_atomic or __transaction_relaxed statement (GCC Extension). transaction-statement: __transaction_atomic transaction-attribute[opt] compound-statement __transaction_relaxed compound-statement Note that the only valid attribute is: "outer". */ static tree c_parser_transaction (c_parser *parser, enum rid keyword) { unsigned int old_in = parser->in_transaction; unsigned int this_in = 1, new_in; location_t loc = c_parser_peek_token (parser)->location; tree stmt, attrs; gcc_assert ((keyword == RID_TRANSACTION_ATOMIC || keyword == RID_TRANSACTION_RELAXED) && c_parser_next_token_is_keyword (parser, keyword)); c_parser_consume_token (parser); if (keyword == RID_TRANSACTION_RELAXED) this_in |= TM_STMT_ATTR_RELAXED; else { attrs = c_parser_transaction_attributes (parser); if (attrs) this_in |= parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER); } /* Keep track if we're in the lexical scope of an outer transaction. */ new_in = this_in | (old_in & TM_STMT_ATTR_OUTER); parser->in_transaction = new_in; stmt = c_parser_compound_statement (parser); parser->in_transaction = old_in; if (flag_tm) stmt = c_finish_transaction (loc, stmt, this_in); else error_at (loc, (keyword == RID_TRANSACTION_ATOMIC ? "%<__transaction_atomic%> without transactional memory support enabled" : "%<__transaction_relaxed %> " "without transactional memory support enabled")); return stmt; } /* Parse a __transaction_atomic or __transaction_relaxed expression (GCC Extension). transaction-expression: __transaction_atomic ( expression ) __transaction_relaxed ( expression ) */ static struct c_expr c_parser_transaction_expression (c_parser *parser, enum rid keyword) { struct c_expr ret; unsigned int old_in = parser->in_transaction; unsigned int this_in = 1; location_t loc = c_parser_peek_token (parser)->location; tree attrs; gcc_assert ((keyword == RID_TRANSACTION_ATOMIC || keyword == RID_TRANSACTION_RELAXED) && c_parser_next_token_is_keyword (parser, keyword)); c_parser_consume_token (parser); if (keyword == RID_TRANSACTION_RELAXED) this_in |= TM_STMT_ATTR_RELAXED; else { attrs = c_parser_transaction_attributes (parser); if (attrs) this_in |= parse_tm_stmt_attr (attrs, 0); } parser->in_transaction = this_in; matching_parens parens; if (parens.require_open (parser)) { tree expr = c_parser_expression (parser).value; ret.original_type = TREE_TYPE (expr); ret.value = build1 (TRANSACTION_EXPR, ret.original_type, expr); if (this_in & TM_STMT_ATTR_RELAXED) TRANSACTION_EXPR_RELAXED (ret.value) = 1; SET_EXPR_LOCATION (ret.value, loc); ret.original_code = TRANSACTION_EXPR; if (!parens.require_close (parser)) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); goto error; } } else { error: ret.set_error (); ret.original_code = ERROR_MARK; ret.original_type = NULL; } parser->in_transaction = old_in; if (!flag_tm) error_at (loc, (keyword == RID_TRANSACTION_ATOMIC ? "%<__transaction_atomic%> without transactional memory support enabled" : "%<__transaction_relaxed %> " "without transactional memory support enabled")); set_c_expr_source_range (&ret, loc, loc); return ret; } /* Parse a __transaction_cancel statement (GCC Extension). transaction-cancel-statement: __transaction_cancel transaction-attribute[opt] ; Note that the only valid attribute is "outer". */ static tree c_parser_transaction_cancel (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; tree attrs; bool is_outer = false; gcc_assert (c_parser_next_token_is_keyword (parser, RID_TRANSACTION_CANCEL)); c_parser_consume_token (parser); attrs = c_parser_transaction_attributes (parser); if (attrs) is_outer = (parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER) != 0); if (!flag_tm) { error_at (loc, "%<__transaction_cancel%> without " "transactional memory support enabled"); goto ret_error; } else if (parser->in_transaction & TM_STMT_ATTR_RELAXED) { error_at (loc, "%<__transaction_cancel%> within a " "%<__transaction_relaxed%>"); goto ret_error; } else if (is_outer) { if ((parser->in_transaction & TM_STMT_ATTR_OUTER) == 0 && !is_tm_may_cancel_outer (current_function_decl)) { error_at (loc, "outer %<__transaction_cancel%> not " "within outer %<__transaction_atomic%>"); error_at (loc, " or a %<transaction_may_cancel_outer%> function"); goto ret_error; } } else if (parser->in_transaction == 0) { error_at (loc, "%<__transaction_cancel%> not within " "%<__transaction_atomic%>"); goto ret_error; } return add_stmt (build_tm_abort_call (loc, is_outer)); ret_error: return build1 (NOP_EXPR, void_type_node, error_mark_node); } /* Parse a single source file. */ void c_parse_file (void) { /* Use local storage to begin. If the first token is a pragma, parse it. If it is #pragma GCC pch_preprocess, then this will load a PCH file which will cause garbage collection. */ c_parser tparser; memset (&tparser, 0, sizeof tparser); tparser.tokens = &tparser.tokens_buf[0]; the_parser = &tparser; if (c_parser_peek_token (&tparser)->pragma_kind == PRAGMA_GCC_PCH_PREPROCESS) c_parser_pragma_pch_preprocess (&tparser); the_parser = ggc_alloc<c_parser> (); *the_parser = tparser; if (tparser.tokens == &tparser.tokens_buf[0]) the_parser->tokens = &the_parser->tokens_buf[0]; /* Initialize EH, if we've been told to do so. */ if (flag_exceptions) using_eh_for_cleanups (); c_parser_translation_unit (the_parser); the_parser = NULL; } /* Parse the body of a function declaration marked with "__RTL". The RTL parser works on the level of characters read from a FILE *, whereas c_parser works at the level of tokens. Square this circle by consuming all of the tokens up to and including the closing brace, recording the start/end of the RTL fragment, and reopening the file and re-reading the relevant lines within the RTL parser. This requires the opening and closing braces of the C function to be on separate lines from the RTL they wrap. Take ownership of START_WITH_PASS, if non-NULL. */ void c_parser_parse_rtl_body (c_parser *parser, char *start_with_pass) { if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>")) { free (start_with_pass); return; } location_t start_loc = c_parser_peek_token (parser)->location; /* Consume all tokens, up to the closing brace, handling matching pairs of braces in the rtl dump. */ int num_open_braces = 1; while (1) { switch (c_parser_peek_token (parser)->type) { case CPP_OPEN_BRACE: num_open_braces++; break; case CPP_CLOSE_BRACE: if (--num_open_braces == 0) goto found_closing_brace; break; case CPP_EOF: error_at (start_loc, "no closing brace"); free (start_with_pass); return; default: break; } c_parser_consume_token (parser); } found_closing_brace: /* At the closing brace; record its location. */ location_t end_loc = c_parser_peek_token (parser)->location; /* Consume the closing brace. */ c_parser_consume_token (parser); /* Invoke the RTL parser. */ if (!read_rtl_function_body_from_file_range (start_loc, end_loc)) { free (start_with_pass); return; } /* If a pass name was provided for START_WITH_PASS, run the backend accordingly now, on the cfun created above, transferring ownership of START_WITH_PASS. */ if (start_with_pass) run_rtl_passes (start_with_pass); } #include "gt-c-c-parser.h"
test34.c
int main() { #pragma omp parallel { if (!omp_get_thread_num()) { #pragma omp critical { printf("Started 1\n"); sleep(2); printf("Ended 1\n"); } } else { #pragma omp critical { printf("Started 2\n"); sleep(2); printf("Ended 2\n"); } } } }
transform.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M % % T R R A A NN N SS F O O R R MM MM % % T RRRR AAAAA N N N SSS FFF O O RRRR M M M % % T R R A A N NN SS F O O R R M M % % T R R A A N N SSSSS F OOO R R M M % % % % % % MagickCore Image Transform Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/memory_.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/resource_.h" #include "MagickCore/resize.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o O r i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoOrientImage() adjusts an image so that its orientation is suitable for % viewing (i.e. top-left orientation). % % The format of the AutoOrientImage method is: % % Image *AutoOrientImage(const Image *image, % const OrientationType orientation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o orientation: Current image orientation. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *AutoOrientImage(const Image *image, const OrientationType orientation,ExceptionInfo *exception) { Image *orient_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); orient_image=(Image *) NULL; switch(orientation) { case UndefinedOrientation: case TopLeftOrientation: default: { orient_image=CloneImage(image,0,0,MagickTrue,exception); break; } case TopRightOrientation: { orient_image=FlopImage(image,exception); break; } case BottomRightOrientation: { orient_image=RotateImage(image,180.0,exception); break; } case BottomLeftOrientation: { orient_image=FlipImage(image,exception); break; } case LeftTopOrientation: { orient_image=TransposeImage(image,exception); break; } case RightTopOrientation: { orient_image=RotateImage(image,270.0,exception); break; } case RightBottomOrientation: { orient_image=TransverseImage(image,exception); break; } case LeftBottomOrientation: { orient_image=RotateImage(image,90.0,exception); break; } } if (orient_image != (Image *) NULL) orient_image->orientation=TopLeftOrientation; return(orient_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChopImage() removes a region of an image and collapses the image to occupy % the removed portion. % % The format of the ChopImage method is: % % Image *ChopImage(const Image *image,const RectangleInfo *chop_info) % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o chop_info: Define the region of the image to chop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info, ExceptionInfo *exception) { #define ChopImageTag "Chop/Image" CacheView *chop_view, *image_view; Image *chop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo extent; ssize_t y; /* Check chop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(chop_info != (RectangleInfo *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (((chop_info->x+(ssize_t) chop_info->width) < 0) || ((chop_info->y+(ssize_t) chop_info->height) < 0) || (chop_info->x > (ssize_t) image->columns) || (chop_info->y > (ssize_t) image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); extent=(*chop_info); if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns) extent.width=(size_t) ((ssize_t) image->columns-extent.x); if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows) extent.height=(size_t) ((ssize_t) image->rows-extent.y); if (extent.x < 0) { extent.width-=(size_t) (-extent.x); extent.x=0; } if (extent.y < 0) { extent.height-=(size_t) (-extent.y); extent.y=0; } chop_image=CloneImage(image,image->columns-extent.width,image->rows- extent.height,MagickTrue,exception); if (chop_image == (Image *) NULL) return((Image *) NULL); /* Extract chop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); chop_view=AcquireAuthenticCacheView(chop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,chop_image,extent.y,1) #endif for (y=0; y < (ssize_t) extent.y; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ChopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* Extract chop image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,chop_image,image->rows-(extent.y+extent.height),1) #endif for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ChopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } chop_view=DestroyCacheView(chop_view); image_view=DestroyCacheView(image_view); chop_image->type=image->type; if (status == MagickFalse) chop_image=DestroyImage(chop_image); return(chop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C M Y K I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a % single image. % % The format of the ConsolidateCMYKImage method is: % % Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConsolidateCMYKImages(const Image *images, ExceptionInfo *exception) { CacheView *cmyk_view, *image_view; Image *cmyk_image, *cmyk_images; ssize_t j; ssize_t y; /* Consolidate separate C, M, Y, and K planes into a single image. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); cmyk_images=NewImageList(); for (j=0; j < (ssize_t) GetImageListLength(images); j+=4) { ssize_t i; assert(images != (Image *) NULL); cmyk_image=CloneImage(images,0,0,MagickTrue, exception); if (cmyk_image == (Image *) NULL) break; if (SetImageStorageClass(cmyk_image,DirectClass,exception) == MagickFalse) break; (void) SetImageColorspace(cmyk_image,CMYKColorspace,exception); for (i=0; i < 4; i++) { image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { Quantum pixel; pixel=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)); switch (i) { case 0: SetPixelCyan(cmyk_image,pixel,q); break; case 1: SetPixelMagenta(cmyk_image,pixel,q); break; case 2: SetPixelYellow(cmyk_image,pixel,q); break; case 3: SetPixelBlack(cmyk_image,pixel,q); break; default: break; } p+=GetPixelChannels(images); q+=GetPixelChannels(cmyk_image); } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; } AppendImageToList(&cmyk_images,cmyk_image); } return(cmyk_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImage() extracts a region of the image starting at the offset defined % by geometry. Region must be fully defined, and no special handling of % geometry flags is performed. % % The format of the CropImage method is: % % Image *CropImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to crop with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry, ExceptionInfo *exception) { #define CropImageTag "Crop/Image" CacheView *crop_view, *image_view; Image *crop_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; RectangleInfo bounding_box, page; ssize_t y; /* Check crop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); bounding_box=image->page; if ((bounding_box.width == 0) || (bounding_box.height == 0)) { bounding_box.width=image->columns; bounding_box.height=image->rows; } page=(*geometry); if (page.width == 0) page.width=bounding_box.width; if (page.height == 0) page.height=bounding_box.height; if (((bounding_box.x-page.x) >= (ssize_t) page.width) || ((bounding_box.y-page.y) >= (ssize_t) page.height) || ((page.x-bounding_box.x) > (ssize_t) image->columns) || ((page.y-bounding_box.y) > (ssize_t) image->rows)) { /* Crop is not within virtual canvas, return 1 pixel transparent image. */ (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","(\"%.20gx%.20g%+.20g%+.20g\") `%s'", (double) geometry->width,(double) geometry->height, (double) geometry->x,(double) geometry->y,image->filename); crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha_trait=BlendPixelTrait; crop_image->background_color.alpha=(MagickRealType) TransparentAlpha; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=bounding_box; crop_image->page.x=(-1); crop_image->page.y=(-1); if (crop_image->dispose == BackgroundDispose) crop_image->dispose=NoneDispose; return(crop_image); } if ((page.x < 0) && (bounding_box.x >= 0)) { page.width+=page.x-bounding_box.x; page.x=0; } else { page.width-=bounding_box.x-page.x; page.x-=bounding_box.x; if (page.x < 0) page.x=0; } if ((page.y < 0) && (bounding_box.y >= 0)) { page.height+=page.y-bounding_box.y; page.y=0; } else { page.height-=bounding_box.y-page.y; page.y-=bounding_box.y; if (page.y < 0) page.y=0; } if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns) page.width=image->columns-page.x; if ((geometry->width != 0) && (page.width > geometry->width)) page.width=geometry->width; if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows) page.height=image->rows-page.y; if ((geometry->height != 0) && (page.height > geometry->height)) page.height=geometry->height; bounding_box.x+=page.x; bounding_box.y+=page.y; if ((page.width == 0) || (page.height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return((Image *) NULL); } /* Initialize crop image attributes. */ crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->page.width=image->page.width; crop_image->page.height=image->page.height; offset.x=(ssize_t) (bounding_box.x+bounding_box.width); offset.y=(ssize_t) (bounding_box.y+bounding_box.height); if ((offset.x > (ssize_t) image->page.width) || (offset.y > (ssize_t) image->page.height)) { crop_image->page.width=bounding_box.width; crop_image->page.height=bounding_box.height; } crop_image->page.x=bounding_box.x; crop_image->page.y=bounding_box.y; /* Crop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); crop_view=AcquireAuthenticCacheView(crop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,crop_image,crop_image->rows,1) #endif for (y=0; y < (ssize_t) crop_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns, 1,exception); q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) crop_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait crop_traits=GetPixelChannelTraits(crop_image,channel); if ((traits == UndefinedPixelTrait) || (crop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(crop_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(crop_image); } if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CropImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } crop_view=DestroyCacheView(crop_view); image_view=DestroyCacheView(image_view); crop_image->type=image->type; if (status == MagickFalse) crop_image=DestroyImage(crop_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e T o T i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImageToTiles() crops a single image, into a possible list of tiles. % This may include a single sub-region of the image. This basically applies % all the normal geometry flags for Crop. % % Image *CropImageToTiles(const Image *image, % const RectangleInfo *crop_geometry, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t PixelRoundOffset(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(CastDoubleToLong(floor(x))); return(CastDoubleToLong(ceil(x))); } MagickExport Image *CropImageToTiles(const Image *image, const char *crop_geometry,ExceptionInfo *exception) { Image *next, *crop_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception); if ((flags & AreaValue) != 0) { PointInfo delta, offset; RectangleInfo crop; size_t height, width; /* Crop into NxM tiles (@ flag). */ crop_image=NewImageList(); width=image->columns; height=image->rows; if (geometry.width == 0) geometry.width=1; if (geometry.height == 0) geometry.height=1; if ((flags & AspectValue) == 0) { width-=(geometry.x < 0 ? -1 : 1)*geometry.x; height-=(geometry.y < 0 ? -1 : 1)*geometry.y; } else { width+=(geometry.x < 0 ? -1 : 1)*geometry.x; height+=(geometry.y < 0 ? -1 : 1)*geometry.y; } delta.x=(double) width/geometry.width; delta.y=(double) height/geometry.height; if (delta.x < 1.0) delta.x=1.0; if (delta.y < 1.0) delta.y=1.0; for (offset.y=0; offset.y < (double) height; ) { if ((flags & AspectValue) == 0) { crop.y=PixelRoundOffset((double) (offset.y- (geometry.y > 0 ? 0 : geometry.y))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) PixelRoundOffset((double) (offset.y+ (geometry.y < 0 ? 0 : geometry.y))); } else { crop.y=PixelRoundOffset((double) (offset.y- (geometry.y > 0 ? geometry.y : 0))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) PixelRoundOffset((double) (offset.y+(geometry.y < -1 ? geometry.y : 0))); } crop.height-=crop.y; crop.y+=image->page.y; for (offset.x=0; offset.x < (double) width; ) { if ((flags & AspectValue) == 0) { crop.x=PixelRoundOffset((double) (offset.x- (geometry.x > 0 ? 0 : geometry.x))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) PixelRoundOffset((double) (offset.x+ (geometry.x < 0 ? 0 : geometry.x))); } else { crop.x=PixelRoundOffset((double) (offset.x- (geometry.x > 0 ? geometry.x : 0))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) PixelRoundOffset((double) (offset.x+ (geometry.x < 0 ? geometry.x : 0))); } crop.width-=crop.x; crop.x+=image->page.x; next=CropImage(image,&crop,exception); if (next != (Image *) NULL) AppendImageToList(&crop_image,next); } } ClearMagickException(exception); return(crop_image); } if (((geometry.width == 0) && (geometry.height == 0)) || ((flags & XValue) != 0) || ((flags & YValue) != 0)) { /* Crop a single region at +X+Y. */ crop_image=CropImage(image,&geometry,exception); if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0)) { crop_image->page.width=geometry.width; crop_image->page.height=geometry.height; crop_image->page.x-=geometry.x; crop_image->page.y-=geometry.y; } return(crop_image); } if ((image->columns > geometry.width) || (image->rows > geometry.height)) { RectangleInfo page; size_t height, width; ssize_t x, y; /* Crop into tiles of fixed size WxH. */ page=image->page; if (page.width == 0) page.width=image->columns; if (page.height == 0) page.height=image->rows; width=geometry.width; if (width == 0) width=page.width; height=geometry.height; if (height == 0) height=page.height; next=(Image *) NULL; crop_image=NewImageList(); for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height) { for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width) { geometry.width=width; geometry.height=height; geometry.x=x; geometry.y=y; next=CropImage(image,&geometry,exception); if (next == (Image *) NULL) break; AppendImageToList(&crop_image,next); } if (next == (Image *) NULL) break; } return(crop_image); } return(CloneImage(image,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x c e r p t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExcerptImage() returns a excerpt of the image as defined by the geometry. % % The format of the ExcerptImage method is: % % Image *ExcerptImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExcerptImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define ExcerptImageTag "Excerpt/Image" CacheView *excerpt_view, *image_view; Image *excerpt_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate excerpt image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (excerpt_image == (Image *) NULL) return((Image *) NULL); /* Excerpt each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,excerpt_image,excerpt_image->rows,1) #endif for (y=0; y < (ssize_t) excerpt_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) excerpt_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait excerpt_traits=GetPixelChannelTraits(excerpt_image,channel); if ((traits == UndefinedPixelTrait) || (excerpt_traits == UndefinedPixelTrait)) continue; SetPixelChannel(excerpt_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(excerpt_image); } if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ExcerptImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } excerpt_view=DestroyCacheView(excerpt_view); image_view=DestroyCacheView(image_view); excerpt_image->type=image->type; if (status == MagickFalse) excerpt_image=DestroyImage(excerpt_image); return(excerpt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x t e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExtentImage() extends the image as defined by the geometry, gravity, and % image background color. Set the (x,y) offset of the geometry to move the % original image relative to the extended image. % % The format of the ExtentImage method is: % % Image *ExtentImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExtentImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { Image *extent_image; MagickBooleanType status; /* Allocate extent image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (extent_image == (Image *) NULL) return((Image *) NULL); status=SetImageBackgroundColor(extent_image,exception); if (status == MagickFalse) { extent_image=DestroyImage(extent_image); return((Image *) NULL); } status=CompositeImage(extent_image,image,image->compose,MagickTrue, -geometry->x,-geometry->y,exception); if (status != MagickFalse) Update8BIMClipPath(extent_image,image->columns,image->rows,geometry); return(extent_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlipImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis. % % The format of the FlipImage method is: % % Image *FlipImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception) { #define FlipImageTag "Flip/Image" CacheView *flip_view, *image_view; Image *flip_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flip_image=CloneImage(image,0,0,MagickTrue,exception); if (flip_image == (Image *) NULL) return((Image *) NULL); /* Flip image. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flip_view=AcquireAuthenticCacheView(flip_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flip_image,flip_image->rows,1) #endif for (y=0; y < (ssize_t) flip_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y- 1),flip_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) flip_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait flip_traits=GetPixelChannelTraits(flip_image,channel); if ((traits == UndefinedPixelTrait) || (flip_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flip_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(flip_image); } if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FlipImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flip_view=DestroyCacheView(flip_view); image_view=DestroyCacheView(image_view); flip_image->type=image->type; if (page.height != 0) page.y=(ssize_t) (page.height-flip_image->rows-page.y); flip_image->page=page; if (status == MagickFalse) flip_image=DestroyImage(flip_image); return(flip_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlopImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis. % % The format of the FlopImage method is: % % Image *FlopImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception) { #define FlopImageTag "Flop/Image" CacheView *flop_view, *image_view; Image *flop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flop_image=CloneImage(image,0,0,MagickTrue,exception); if (flop_image == (Image *) NULL) return((Image *) NULL); /* Flop each row. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flop_view=AcquireAuthenticCacheView(flop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flop_image,flop_image->rows,1) #endif for (y=0; y < (ssize_t) flop_image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(flop_image)*flop_image->columns; for (x=0; x < (ssize_t) flop_image->columns; x++) { ssize_t i; q-=GetPixelChannels(flop_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel); if ((traits == UndefinedPixelTrait) || (flop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flop_image,channel,p[i],q); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FlopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flop_view=DestroyCacheView(flop_view); image_view=DestroyCacheView(image_view); flop_image->type=image->type; if (page.width != 0) page.x=(ssize_t) (page.width-flop_image->columns-page.x); flop_image->page=page; if (status == MagickFalse) flop_image=DestroyImage(flop_image); return(flop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RollImage() offsets an image as defined by x_offset and y_offset. % % The format of the RollImage method is: % % Image *RollImage(const Image *image,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x_offset: the number of columns to roll in the horizontal direction. % % o y_offset: the number of rows to roll in the vertical direction. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy, const ssize_t dx,const ssize_t dy,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; ssize_t y; if (columns == 0) return(MagickTrue); status=MagickTrue; source_view=AcquireVirtualCacheView(source,exception); destination_view=AcquireAuthenticCacheView(destination,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,destination,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; /* Transfer scanline. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception); q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(source); i++) { PixelChannel channel = GetPixelChannelChannel(source,i); PixelTrait source_traits=GetPixelChannelTraits(source,channel); PixelTrait destination_traits=GetPixelChannelTraits(destination, channel); if ((source_traits == UndefinedPixelTrait) || (destination_traits == UndefinedPixelTrait)) continue; SetPixelChannel(destination,channel,p[i],q); } p+=GetPixelChannels(source); q+=GetPixelChannels(destination); } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *RollImage(const Image *image,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define RollImageTag "Roll/Image" Image *roll_image; MagickStatusType status; RectangleInfo offset; /* Initialize roll image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); roll_image=CloneImage(image,0,0,MagickTrue,exception); if (roll_image == (Image *) NULL) return((Image *) NULL); offset.x=x_offset; offset.y=y_offset; while (offset.x < 0) offset.x+=(ssize_t) image->columns; while (offset.x >= (ssize_t) image->columns) offset.x-=(ssize_t) image->columns; while (offset.y < 0) offset.y+=(ssize_t) image->rows; while (offset.y >= (ssize_t) image->rows) offset.y-=(ssize_t) image->rows; /* Roll image. */ status=CopyImageRegion(roll_image,image,(size_t) offset.x, (size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows- offset.y,0,0,exception); (void) SetImageProgress(image,RollImageTag,0,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x, (size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0, exception); (void) SetImageProgress(image,RollImageTag,1,3); status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows- offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception); (void) SetImageProgress(image,RollImageTag,2,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows- offset.y,0,0,offset.x,offset.y,exception); (void) SetImageProgress(image,RollImageTag,3,3); roll_image->type=image->type; if (status == MagickFalse) roll_image=DestroyImage(roll_image); return(roll_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShaveImage() shaves pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ShaveImage method is: % % Image *ShaveImage(const Image *image,const RectangleInfo *shave_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o shave_image: Method ShaveImage returns a pointer to the shaved % image. A null image is returned if there is a memory shortage or % if the image width or height is zero. % % o image: the image. % % o shave_info: Specifies a pointer to a RectangleInfo which defines the % region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShaveImage(const Image *image, const RectangleInfo *shave_info,ExceptionInfo *exception) { Image *shave_image; RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (((2*shave_info->width) >= image->columns) || ((2*shave_info->height) >= image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); SetGeometry(image,&geometry); geometry.width-=2*shave_info->width; geometry.height-=2*shave_info->height; geometry.x=(ssize_t) shave_info->width+image->page.x; geometry.y=(ssize_t) shave_info->height+image->page.y; shave_image=CropImage(image,&geometry,exception); if (shave_image == (Image *) NULL) return((Image *) NULL); shave_image->page.width-=2*shave_info->width; shave_image->page.height-=2*shave_info->height; shave_image->page.x-=(ssize_t) shave_info->width; shave_image->page.y-=(ssize_t) shave_info->height; return(shave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p l i c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpliceImage() splices a solid color into the image as defined by the % geometry. % % The format of the SpliceImage method is: % % Image *SpliceImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to splice with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpliceImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define SpliceImageTag "Splice/Image" CacheView *image_view, *splice_view; Image *splice_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo splice_geometry; ssize_t columns, y; /* Allocate splice image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); splice_geometry=(*geometry); splice_image=CloneImage(image,image->columns+splice_geometry.width, image->rows+splice_geometry.height,MagickTrue,exception); if (splice_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse) { splice_image=DestroyImage(splice_image); return((Image *) NULL); } if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) && (IsGrayColorspace(splice_image->colorspace) != MagickFalse)) (void) SetImageColorspace(splice_image,sRGBColorspace,exception); if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) && (splice_image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlpha(splice_image,OpaqueAlpha,exception); (void) SetImageBackgroundColor(splice_image,exception); /* Respect image geometry. */ switch (image->gravity) { default: case UndefinedGravity: case NorthWestGravity: break; case NorthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; break; } case NorthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; break; } case WestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.width/2; break; } case CenterGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case EastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case SouthWestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } } /* Splice image. */ status=MagickTrue; progress=0; columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns); image_view=AcquireVirtualCacheView(image,exception); splice_view=AcquireAuthenticCacheView(splice_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_geometry.y,1) #endif for (y=0; y < (ssize_t) splice_geometry.y; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpliceImageTag,progress, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_image->rows,2) #endif for (y=(ssize_t) (splice_geometry.y+splice_geometry.height); y < (ssize_t) splice_image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; if ((y < 0) || (y >= (ssize_t)splice_image->rows)) continue; p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height, splice_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpliceImageTag,progress, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } splice_view=DestroyCacheView(splice_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) splice_image=DestroyImage(splice_image); return(splice_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImage() is a convenience method that behaves like ResizeImage() or % CropImage() but accepts scaling and/or cropping information as a region % geometry specification. If the operation fails, the original image handle % is left as is. % % This should only be used for single images. % % This function destroys what it assumes to be a single image list. % If the input image is part of a larger list, all other images in that list % will be simply 'lost', not destroyed. % % Also if the crop generates a list of images only the first image is resized. % And finally if the crop succeeds and the resize failed, you will get a % cropped image, as well as a 'false' or 'failed' report. % % This function and should probably be deprecated in favor of direct calls % to CropImageToTiles() or ResizeImage(), as appropriate. % % The format of the TransformImage method is: % % MagickBooleanType TransformImage(Image **image,const char *crop_geometry, % const char *image_geometry,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType TransformImage(Image **image, const char *crop_geometry,const char *image_geometry,ExceptionInfo *exception) { Image *resize_image, *transform_image; RectangleInfo geometry; assert(image != (Image **) NULL); assert((*image)->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); transform_image=(*image); if (crop_geometry != (const char *) NULL) { Image *crop_image; /* Crop image to a user specified size. */ crop_image=CropImageToTiles(*image,crop_geometry,exception); if (crop_image == (Image *) NULL) transform_image=CloneImage(*image,0,0,MagickTrue,exception); else { transform_image=DestroyImage(transform_image); transform_image=GetFirstImageInList(crop_image); } *image=transform_image; } if (image_geometry == (const char *) NULL) return(MagickTrue); /* Scale image to a user specified size. */ (void) ParseRegionGeometry(transform_image,image_geometry,&geometry, exception); if ((transform_image->columns == geometry.width) && (transform_image->rows == geometry.height)) return(MagickTrue); resize_image=ResizeImage(transform_image,geometry.width,geometry.height, transform_image->filter,exception); if (resize_image == (Image *) NULL) return(MagickFalse); transform_image=DestroyImage(transform_image); transform_image=resize_image; *image=transform_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p o s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransposeImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis while rotating them by 90 degrees. % % The format of the TransposeImage method is: % % Image *TransposeImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception) { #define TransposeImageTag "Transpose/Image" CacheView *image_view, *transpose_view; Image *transpose_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transpose_image == (Image *) NULL) return((Image *) NULL); /* Transpose image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transpose_view=AcquireAuthenticCacheView(transpose_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transpose_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1), 0,1,transpose_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image, channel); if ((traits == UndefinedPixelTrait) || (transpose_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transpose_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(transpose_image); } if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransposeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transpose_view=DestroyCacheView(transpose_view); image_view=DestroyCacheView(image_view); transpose_image->type=image->type; page=transpose_image->page; Swap(page.width,page.height); Swap(page.x,page.y); transpose_image->page=page; if (status == MagickFalse) transpose_image=DestroyImage(transpose_image); return(transpose_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s v e r s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransverseImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis while rotating them by 270 degrees. % % The format of the TransverseImage method is: % % Image *TransverseImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception) { #define TransverseImageTag "Transverse/Image" CacheView *image_view, *transverse_view; Image *transverse_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transverse_image == (Image *) NULL) return((Image *) NULL); /* Transverse image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transverse_view=AcquireAuthenticCacheView(transverse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transverse_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-1), 0,1,transverse_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(transverse_image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; q-=GetPixelChannels(transverse_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait transverse_traits=GetPixelChannelTraits(transverse_image, channel); if ((traits == UndefinedPixelTrait) || (transverse_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transverse_image,channel,p[i],q); } p+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(transverse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransverseImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transverse_view=DestroyCacheView(transverse_view); image_view=DestroyCacheView(image_view); transverse_image->type=image->type; page=transverse_image->page; Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-transverse_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-transverse_image->rows-page.y); transverse_image->page=page; if (status == MagickFalse) transverse_image=DestroyImage(transverse_image); return(transverse_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r i m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TrimImage() trims pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the TrimImage method is: % % Image *TrimImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception) { const char *artifact; Image *trim_image; RectangleInfo geometry, page; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); geometry=GetImageBoundingBox(image,exception); if ((geometry.width == 0) || (geometry.height == 0)) { Image *crop_image; crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha_trait=BlendPixelTrait; crop_image->background_color.alpha=(MagickRealType) TransparentAlpha; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=image->page; crop_image->page.x=(-1); crop_image->page.y=(-1); return(crop_image); } page=geometry; artifact=GetImageArtifact(image,"trim:minSize"); if (artifact != (const char *) NULL) (void) ParseAbsoluteGeometry(artifact,&page); if ((geometry.width < page.width) && (geometry.height < page.height)) { /* Limit trim to a minimum size. */ switch (image->gravity) { case CenterGravity: { geometry.x-=((ssize_t) page.width-geometry.width)/2; geometry.y-=((ssize_t) page.height-geometry.height)/2; break; } case NorthWestGravity: { geometry.x-=((ssize_t) page.width-geometry.width); geometry.y-=((ssize_t) page.height-geometry.height); break; } case NorthGravity: { geometry.x-=((ssize_t) page.width-geometry.width)/2; geometry.y-=((ssize_t) page.height-geometry.height); break; } case NorthEastGravity: { geometry.y-=((ssize_t) page.height-geometry.height); break; } case EastGravity: { geometry.y-=((ssize_t) page.height-geometry.height)/2; break; } case SouthEastGravity: break; case SouthGravity: { geometry.x-=((ssize_t) page.width-geometry.width)/2; break; } case SouthWestGravity: { geometry.x-=((ssize_t) page.width-geometry.width); break; } case WestGravity: { geometry.x-=((ssize_t) page.width-geometry.width); geometry.y-=((ssize_t) page.height-geometry.height)/2; break; } default: break; } geometry.width=page.width; geometry.height=page.height; } geometry.x+=image->page.x; geometry.y+=image->page.y; trim_image=CropImage(image,&geometry,exception); if (trim_image != (Image *) NULL) Update8BIMClipPath(trim_image,image->columns,image->rows,&geometry); return(trim_image); }
69c979fee_ac_so4.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; double section1; }; void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw); int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine, struct profiler *timers) { int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); int xb_size = block_sizes[0]; int yb_size = block_sizes[1]; int x0_blk0_size = block_sizes[2]; int y0_blk0_size = block_sizes[3]; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size); //for (int time = time_m, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3); time <= time_M; time += 1, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3)) //{ int sf = 2; int t_blk_size = 2 * sf * (time_M - time_m); struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { for (int time = t_blk, t0 = (time + 2) % (3), t1 = (time) % (3), t2 = (time + 1) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3), t1 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); bf0(damp_vec, dt, u_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, source_id_vec, source_mask_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, time, tw); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, nthreads); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000; } for (int time = time_m, t2 = (time + 1) % (3); time <= time_M; time += 1, t2 = (time + 1) % (3)) { struct timeval start_section1, end_section1; gettimeofday(&start_section1, NULL); /* Begin section1 */ /* End section1 */ gettimeofday(&end_section1, NULL); timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000; } return 0; } void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw) { float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; if (x0_blk0_size == 0 || y0_blk0_size == 0) { return; } #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { #pragma omp simd aligned(damp, u, vp : 32) for (int z = z_m; z <= z_M; z += 1) { float r8 = 1.0/dt; float r7 = 1.0/(dt*dt); float r6 = 1.0/(vp[x - time + 4][y - time + 4][z + 4]*vp[x - time + 4][y - time + 4][z + 4]); u[t2][x - time + 4][y - time + 4][z + 4] = (r6*(-r7*(u[t0][x - time + 4][y - time + 4][z + 4] - 2.0F*u[t1][x - time + 4][y - time + 4][z + 4])) + r8*(damp[x - time + 1][y - time + 1][z + 1]*u[t1][x - time + 4][y - time + 4][z + 4]) - 3.70370379e-4F*(u[t1][x - time + 2][y - time + 4][z + 4] + u[t1][x - time + 4][y - time + 2][z + 4] + u[t1][x - time + 4][y - time + 4][z + 2] + u[t1][x - time + 4][y - time + 4][z + 6] + u[t1][x - time + 4][y - time + 6][z + 4] + u[t1][x - time + 6][y - time + 4][z + 4]) + 5.92592607e-3F*(u[t1][x - time + 3][y - time + 4][z + 4] + u[t1][x - time + 4][y - time + 3][z + 4] + u[t1][x - time + 4][y - time + 4][z + 3] + u[t1][x - time + 4][y - time + 4][z + 5] + u[t1][x - time + 4][y - time + 5][z + 4] + u[t1][x - time + 5][y - time + 4][z + 4]) - 3.33333341e-2F*u[t1][x - time + 4][y - time + 4][z + 4])/(r6*r7 + r8*damp[x - time + 1][y - time + 1][z + 1]); } #pragma omp simd aligned(damp, u, vp : 32) for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { int zind = sp_source_mask[x - time][y - time][sp_zi]; float r0 = save_src_u[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; u[t2][x - time + 4][y - time + 4][zind + 4] += r0; } } } } } } }
cancel.c
// RUN: %libomp-compile && env OMP_CANCELLATION=true %libomp-run | %sort-threads | FileCheck %s // REQUIRES: ompt #include "callback.h" int main() { #pragma omp parallel num_threads(1) { #pragma omp cancel parallel } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_cancel' // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_create: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[NULL]], new_task_id=[[TASK_ID:[0-9]+]], parallel_function={{0x[0-f]*}}, task_type=ompt_task_initial=1, has_dependences=no // CHECK: {{^}}[[MASTER_ID]]: ompt_event_cancel: task_data=[[TASK_ID:[0-9]+]], flags=17, codeptr_ra={{0x[0-f]*}} return 0; }
rose_private.c
/* * private(including lastprivate) scalars can be recognized by liveness analysis * They are dead (not belong to live-in variable sets) with respect to the loop body * If they are live-out with respect to the loop, it is lastprivate. */ #include "omp.h" int g; void foo() { int i; int x; int a[100]; int b[100]; // x should be recognized as a private variable during parallelization // yet it introduces a set of dependencies which can be eliminated #pragma omp parallel for private (x,i) firstprivate (g) for (i = 0; i <= 99; i += 1) { int y = i + 1; // g = y; x = a[i] + g; //b[i]=x+1+y; } } /* * dep SgExprStatement:x =(a[i]); SgExprStatement:x =(a[i]); 1*1 SCALAR_DEP; commonlevel = 1 CarryLevel = 0 Scalar dep type OUTPUT_DEP;SgVarRefExp:x@13:6->SgVarRefExp:x@13:6 == 0;||:: dep SgExprStatement:x =(a[i]); SgExprStatement:b[i] =(x + 1); 1*1 SCALAR_DEP; commonlevel = 1 CarryLevel = 1 Scalar dep type TRUE_DEP;SgVarRefExp:x@13:6->SgVarRefExp:x@14:10 == 0;||:: dep SgExprStatement:b[i] =(x + 1); SgExprStatement:x =(a[i]); 1*1 SCALAR_BACK_DEP; commonlevel = 1 CarryLevel = 0 Scalar dep type ANTI_DEP;SgVarRefExp:x@14:10->SgVarRefExp:x@13:6 <= -1;||:: */
mpncpdq.c
/* $Header$ */ /* mpncpdq -- netCDF pack, re-dimension, query */ /* Purpose: Pack, re-dimension, query single netCDF file and output to a single file */ /* Copyright (C) 1995--present Charlie Zender This file is part of NCO, the netCDF Operators. NCO is free software. You may redistribute and/or modify NCO under the terms of the 3-Clause BSD License. You are permitted to link NCO with the HDF, netCDF, OPeNDAP, and UDUnits libraries and to distribute the resulting executables under the terms of the BSD, but in addition obeying the extra stipulations of the HDF, netCDF, OPeNDAP, and UDUnits licenses. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 3-Clause BSD License for more details. The original author of this software, Charlie Zender, seeks to improve it with your suggestions, contributions, bug-reports, and patches. Please contact the NCO project at http://nco.sf.net or write to Charlie Zender Department of Earth System Science University of California, Irvine Irvine, CA 92697-3100 */ /* Usage: ncpdq -O -D 3 -a lat,lev,lon -v three_dmn_var ~/nco/data/in.nc ~/foo.nc;ncks -P ~/foo.nc ncpdq -O -D 3 -a lon,lev,lat -v three_dmn_var ~/nco/data/in.nc ~/foo.nc;ncks -P ~/foo.nc ncpdq -O -D 3 -a lon,time -x -v three_double_dmn ~/nco/data/in.nc ~/foo.nc;ncks -P ~/foo.nc ncpdq -O -D 3 -P all_new ~/nco/data/in.nc ~/foo.nc ncpdq -O -D 3 -P all_xst ~/nco/data/in.nc ~/foo.nc ncpdq -O -D 3 -P xst_new ~/nco/data/in.nc ~/foo.nc ncpdq -O -D 3 -P upk ~/nco/data/in.nc ~/foo.nc */ #ifdef HAVE_CONFIG_H # include <config.h> /* Autotools tokens */ #endif /* !HAVE_CONFIG_H */ /* Standard C headers */ #include <math.h> /* sin cos cos sin 3.14159 */ #include <stdio.h> /* stderr, FILE, NULL, etc. */ #include <stdlib.h> /* atof, atoi, malloc, getopt */ #include <string.h> /* strcmp() */ #include <time.h> /* machine time */ #include <unistd.h> /* POSIX stuff */ #ifndef HAVE_GETOPT_LONG # include "nco_getopt.h" #else /* HAVE_GETOPT_LONG */ # ifdef HAVE_GETOPT_H # include <getopt.h> # endif /* !HAVE_GETOPT_H */ #endif /* HAVE_GETOPT_LONG */ /* 3rd party vendors */ #include <netcdf.h> /* netCDF definitions and C library */ #ifdef ENABLE_MPI #include <mpi.h> /* MPI definitions */ #include "nco_mpi.h" /* MPI utilities */ #endif /* !ENABLE_MPI */ /* #define MAIN_PROGRAM_FILE MUST precede #include libnco.h */ #define MAIN_PROGRAM_FILE #include "libnco.h" /* netCDF Operator (NCO) library */ int main(int argc,char **argv) { aed_sct *aed_lst_add_fst=NULL_CEWI; aed_sct *aed_lst_scl_fct=NULL_CEWI; char **dmn_rdr_lst_in=NULL_CEWI; /* Option a */ char **fl_lst_abb=NULL; /* Option n */ char **fl_lst_in=NULL_CEWI; char **var_lst_in=NULL_CEWI; char *cmd_ln; char *cnk_arg[NC_MAX_DIMS]; char *cnk_map_sng=NULL_CEWI; /* [sng] Chunking map */ char *cnk_plc_sng=NULL_CEWI; /* [sng] Chunking policy */ char *fl_in=NULL; char *fl_out=NULL; /* Option o */ char *fl_out_tmp=NULL_CEWI; char *fl_pth=NULL; /* Option p */ char *fl_pth_lcl=NULL; /* Option l */ char *lmt_arg[NC_MAX_DIMS]; char *nco_pck_plc_sng=NULL_CEWI; /* [sng] Packing policy Option P */ char *nco_pck_map_sng=NULL_CEWI; /* [sng] Packing map Option M */ char *opt_crr=NULL; /* [sng] String representation of current long-option name */ char *optarg_lcl=NULL; /* [sng] Local copy of system optarg */ char *rec_dmn_nm_in=NULL; /* [sng] Record dimension name, original */ char *rec_dmn_nm_out=NULL; /* [sng] Record dimension name, re-ordered */ char *rec_dmn_nm_out_crr=NULL; /* [sng] Name of record dimension, if any, required by re-order */ char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */ char add_fst_sng[]="add_offset"; /* [sng] Unidata standard string for add offset */ char scl_fct_sng[]="scale_factor"; /* [sng] Unidata standard string for scale factor */ const char * const CVS_Id="$Id$"; const char * const CVS_Revision="$Revision$"; const char * const opt_sht_lst="34567Aa:CcD:d:FhL:l:M:Oo:P:p:RrSt:v:Ux-:"; cnk_dmn_sct **cnk_dmn=NULL_CEWI; dmn_sct **dim=NULL_CEWI; dmn_sct **dmn_out; dmn_sct **dmn_rdr=NULL; /* [sct] Dimension structures to be re-ordered */ extern char *optarg; extern int optind; /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped shared in parallel clause */ FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ int **dmn_idx_out_in=NULL; /* [idx] Dimension correspondence, output->input CEWI */ int *in_id_arr; int abb_arg_nbr=0; int cnk_map=nco_cnk_map_nil; /* [enm] Chunking map */ int cnk_nbr=0; /* [nbr] Number of chunk sizes */ int cnk_plc=nco_cnk_plc_nil; /* [enm] Chunking policy */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int dmn_out_idx; /* [idx] Index over output dimension list */ int dmn_out_idx_rec_in=NCO_REC_DMN_UNDEFINED; /* [idx] Record dimension index in output dimension list, original */ int dmn_rdr_nbr=0; /* [nbr] Number of dimension to re-order */ int dmn_rdr_nbr_in=0; /* [nbr] Original number of dimension to re-order */ int dmn_rdr_nbr_utl=0; /* [nbr] Number of dimension to re-order, utilized */ int fl_idx=int_CEWI; int fl_nbr=0; int fl_in_fmt; /* [enm] Input file format */ int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int fll_md_old; /* [enm] Old fill mode */ int gaa_nbr=0; /* [nbr] Number of global attributes to add */ int idx=int_CEWI; int idx_rdr=int_CEWI; int in_id; int lmt_nbr=0; /* Option d. NB: lmt_nbr gets incremented */ int log_lvl=0; /* [enm] netCDF library debugging verbosity [0..5] */ int md_open; /* [enm] Mode flag for nc_open() call */ int nbr_dmn_fl; int nbr_dmn_out; int nbr_dmn_xtr; int nbr_var_fix; /* nbr_var_fix gets incremented */ int nbr_var_fl; int nbr_var_prc; /* nbr_var_prc gets incremented */ int xtr_nbr=0; /* xtr_nbr won't otherwise be set for -c with no -v */ int nco_pck_map=nco_pck_map_flt_sht; /* [enm] Packing map */ int nco_pck_plc=nco_pck_plc_nil; /* [enm] Packing policy */ int opt; int out_id; int rcd=NC_NOERR; /* [rcd] Return code */ int rec_dmn_id_in=NCO_REC_DMN_UNDEFINED; /* [id] Record dimension ID in input file */ int thr_idx; /* [idx] Index of current thread */ int thr_nbr=int_CEWI; /* [nbr] Thread number Option t */ int var_lst_in_nbr=0; lmt_sct **aux=NULL_CEWI; /* Auxiliary coordinate limits */ lmt_sct **lmt=NULL_CEWI; lmt_all_sct **lmt_all_lst=NULL_CEWI; /* List of *lmt_all structures */ nco_bool **dmn_rvr_in=NULL; /* [flg] Reverse dimension */ nco_bool *dmn_rvr_rdr=NULL; /* [flg] Reverse dimension */ cnv_sct *cnv; /* [sct] Convention structure */ nco_bool EXCLUDE_INPUT_LIST=False; /* Option c */ nco_bool EXTRACT_ALL_COORDINATES=False; /* Option c */ nco_bool EXTRACT_ASSOCIATED_COORDINATES=True; /* Option C */ nco_bool FL_RTR_RMT_LCN; nco_bool FL_LST_IN_FROM_STDIN=False; /* [flg] fl_lst_in comes from stdin */ nco_bool FORCE_APPEND=False; /* Option A */ nco_bool FORCE_OVERWRITE=False; /* Option O */ nco_bool FORTRAN_IDX_CNV=False; /* Option F */ nco_bool HISTORY_APPEND=True; /* Option h */ nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool REDEFINED_RECORD_DIMENSION=False; /* [flg] Re-defined record dimension */ nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_CREATE=False; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */ nco_bool SHARE_OPEN=False; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ nco_bool WRT_TMP_FL=True; /* [flg] Write output to temporary file */ nco_bool flg_mmr_cln=False; /* [flg] Clean memory prior to exit */ nm_id_sct *dmn_lst; nm_id_sct *dmn_rdr_lst; nm_id_sct *xtr_lst=NULL; /* xtr_lst may be alloc()'d from NULL with -c option */ size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ size_t cnk_csh_byt=NCO_CNK_CSH_BYT_DFL; /* [B] Chunk cache size */ size_t cnk_min_byt=NCO_CNK_SZ_MIN_BYT_DFL; /* [B] Minimize size of variable to chunk */ size_t cnk_sz_byt=0UL; /* [B] Chunk size in bytes */ size_t cnk_sz_scl=0UL; /* [nbr] Chunk size scalar */ size_t hdr_pad=0UL; /* [B] Pad at end of header section */ var_sct **var; var_sct **var_fix; var_sct **var_fix_out; var_sct **var_out; var_sct **var_prc; var_sct **var_prc_out; #ifdef ENABLE_MPI /* Declare all MPI-specific variables here */ MPI_Status mpi_stt; /* [enm] Status check to decode msg_tag_typ */ nco_bool TKN_WRT_FREE=True; /* [flg] Write-access to output file is available */ int fl_nm_lng; /* [nbr] Output file name length */ int msg_bfr[msg_bfr_lng]; /* [bfr] Buffer containing var, idx, tkn_wrt_rsp */ int jdx=0; /* [idx] For MPI indexing local variables */ /* csz: fxm why 60? make dynamic? */ int lcl_idx_lst[60]; /* [arr] Array containing indices of variables processed at each Worker */ int lcl_nbr_var=0; /* [nbr] Count of variables processes at each Worker */ int msg_tag_typ; /* [enm] MPI message tag type */ int prc_rnk; /* [idx] Process rank */ int prc_nbr=0; /* [nbr] Number of MPI processes */ int tkn_wrt_rsp; /* [enm] Response to request for write token */ int var_wrt_nbr=0; /* [nbr] Variables written to output file until now */ int rnk_wrk; /* [idx] Worker rank */ int wrk_id_bfr[wrk_id_bfr_lng]; /* [bfr] Buffer for rnk_wrk */ #endif /* !ENABLE_MPI */ static struct option opt_lng[]={ /* Structure ordered by short option key if possible */ /* Long options with no argument, no short option counterpart */ {"ram_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */ {"create_ram",no_argument,0,0}, /* [flg] Create file in RAM */ {"open_ram",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) in RAM */ {"diskless_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */ {"share_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"create_share",no_argument,0,0}, /* [flg] Create (netCDF3) file(s) with unbuffered I/O */ {"open_share",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) with unbuffered I/O */ {"unbuffered_io",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"uio",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"wrt_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */ {"write_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */ {"no_tmp_fl",no_argument,0,0}, /* [flg] Do not write output to temporary file */ {"version",no_argument,0,0}, {"vrs",no_argument,0,0}, /* Long options with argument, no short option counterpart */ {"bfr_sz_hnt",required_argument,0,0}, /* [B] Buffer size hint */ {"buffer_size_hint",required_argument,0,0}, /* [B] Buffer size hint */ {"cnk_byt",required_argument,0,0}, /* [B] Chunk size in bytes */ {"chunk_byte",required_argument,0,0}, /* [B] Chunk size in bytes */ {"cnk_dmn",required_argument,0,0}, /* [nbr] Chunk size */ {"chunk_dimension",required_argument,0,0}, /* [nbr] Chunk size */ {"cnk_map",required_argument,0,0}, /* [nbr] Chunking map */ {"chunk_map",required_argument,0,0}, /* [nbr] Chunking map */ {"cnk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */ {"chunk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */ {"cnk_plc",required_argument,0,0}, /* [nbr] Chunking policy */ {"chunk_policy",required_argument,0,0}, /* [nbr] Chunking policy */ {"cnk_scl",required_argument,0,0}, /* [nbr] Chunk size scalar */ {"chunk_scalar",required_argument,0,0}, /* [nbr] Chunk size scalar */ {"fl_fmt",required_argument,0,0}, {"file_format",required_argument,0,0}, {"gaa",required_argument,0,0}, /* [sng] Global attribute add */ {"glb_att_add",required_argument,0,0}, /* [sng] Global attribute add */ {"hdr_pad",required_argument,0,0}, {"header_pad",required_argument,0,0}, {"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */ {"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */ /* Long options with short counterparts */ {"3",no_argument,0,'3'}, {"4",no_argument,0,'4'}, {"netcdf4",no_argument,0,'4'}, {"5",no_argument,0,'5'}, {"64bit_data",no_argument,0,'5'}, {"cdf5",no_argument,0,'5'}, {"pnetcdf",no_argument,0,'5'}, {"64bit_offset",no_argument,0,'6'}, {"7",no_argument,0,'7'}, {"append",no_argument,0,'A'}, {"arrange",required_argument,0,'a'}, {"permute",required_argument,0,'a'}, {"reorder",required_argument,0,'a'}, {"rdr",required_argument,0,'a'}, {"xtr_ass_var",no_argument,0,'c'}, {"xcl_ass_var",no_argument,0,'C'}, {"no_coords",no_argument,0,'C'}, {"no_crd",no_argument,0,'C'}, {"coords",no_argument,0,'c'}, {"crd",no_argument,0,'c'}, {"dbg_lvl",required_argument,0,'D'}, {"debug",required_argument,0,'D'}, {"nco_dbg_lvl",required_argument,0,'D'}, {"dimension",required_argument,0,'d'}, {"dmn",required_argument,0,'d'}, {"fortran",no_argument,0,'F'}, {"ftn",no_argument,0,'F'}, {"history",no_argument,0,'h'}, {"hst",no_argument,0,'h'}, {"dfl_lvl",required_argument,0,'L'}, /* [enm] Deflate level */ {"deflate",required_argument,0,'L'}, /* [enm] Deflate level */ {"local",required_argument,0,'l'}, {"lcl",required_argument,0,'l'}, {"pck_map",required_argument,0,'M'}, {"map",required_argument,0,'M'}, {"overwrite",no_argument,0,'O'}, {"ovr",no_argument,0,'O'}, {"output",required_argument,0,'o'}, {"fl_out",required_argument,0,'o'}, {"pack_policy",required_argument,0,'P'}, {"pck_plc",required_argument,0,'P'}, {"path",required_argument,0,'p'}, {"retain",no_argument,0,'R'}, {"rtn",no_argument,0,'R'}, {"revision",no_argument,0,'r'}, {"suspend", no_argument,0,'S'}, {"thr_nbr",required_argument,0,'t'}, {"threads",required_argument,0,'t'}, {"omp_num_threads",required_argument,0,'t'}, {"unpack",no_argument,0,'U'}, {"upk",no_argument,0,'U'}, {"variable",required_argument,0,'v'}, {"auxiliary",required_argument,0,'X'}, {"exclude",no_argument,0,'x'}, {"xcl",no_argument,0,'x'}, {"help",no_argument,0,'?'}, {"hlp",no_argument,0,'?'}, {0,0,0,0} }; /* end opt_lng */ int opt_idx=0; /* Index of current long option into opt_lng array */ #ifdef ENABLE_MPI /* MPI Initialization */ MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD,&prc_nbr); MPI_Comm_rank(MPI_COMM_WORLD,&prc_rnk); #endif /* !ENABLE_MPI */ /* Start clock and save command line */ cmd_ln=nco_cmd_ln_sng(argc,argv); /* Get program name and set program enum (e.g., nco_prg_id=ncra) */ nco_prg_nm=nco_prg_prs(argv[0],&nco_prg_id); /* Parse command line arguments */ while(1){ /* getopt_long_only() allows one dash to prefix long options */ opt=getopt_long(argc,argv,opt_sht_lst,opt_lng,&opt_idx); /* NB: access to opt_crr is only valid when long_opt is detected */ if(opt == EOF) break; /* Parse positional arguments once getopt_long() returns EOF */ opt_crr=(char *)strdup(opt_lng[opt_idx].name); /* Process long options without short option counterparts */ if(opt == 0){ if(!strcmp(opt_crr,"bfr_sz_hnt") || !strcmp(opt_crr,"buffer_size_hint")){ bfr_sz_hnt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_byt") || !strcmp(opt_crr,"chunk_byte")){ cnk_sz_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_byt */ if(!strcmp(opt_crr,"cnk_min") || !strcmp(opt_crr,"chunk_min")){ cnk_min_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_min */ if(!strcmp(opt_crr,"cnk_dmn") || !strcmp(opt_crr,"chunk_dimension")){ /* Copy limit argument for later processing */ cnk_arg[cnk_nbr]=(char *)strdup(optarg); cnk_nbr++; } /* endif cnk */ if(!strcmp(opt_crr,"cnk_scl") || !strcmp(opt_crr,"chunk_scalar")){ cnk_sz_scl=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_map") || !strcmp(opt_crr,"chunk_map")){ /* Chunking map */ cnk_map_sng=(char *)strdup(optarg); cnk_map=nco_cnk_map_get(cnk_map_sng); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_plc") || !strcmp(opt_crr,"chunk_policy")){ /* Chunking policy */ cnk_plc_sng=(char *)strdup(optarg); cnk_plc=nco_cnk_plc_get(cnk_plc_sng); } /* endif cnk */ if(!strcmp(opt_crr,"mmr_cln") || !strcmp(opt_crr,"clean")) flg_mmr_cln=True; /* [flg] Clean memory prior to exit */ if(!strcmp(opt_crr,"drt") || !strcmp(opt_crr,"mmr_drt") || !strcmp(opt_crr,"dirty")) flg_mmr_cln=False; /* [flg] Clean memory prior to exit */ if(!strcmp(opt_crr,"fl_fmt") || !strcmp(opt_crr,"file_format")) rcd=nco_create_mode_prs(optarg,&fl_out_fmt); if(!strcmp(opt_crr,"gaa") || !strcmp(opt_crr,"glb_att_add")){ gaa_arg=(char **)nco_realloc(gaa_arg,(gaa_nbr+1)*sizeof(char *)); gaa_arg[gaa_nbr++]=(char *)strdup(optarg); } /* endif gaa */ if(!strcmp(opt_crr,"hdr_pad") || !strcmp(opt_crr,"header_pad")){ hdr_pad=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif "hdr_pad" */ if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){ log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); nc_set_log_level(log_lvl); } /* !log_lvl */ if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"create_ram") || !strcmp(opt_crr,"diskless_all")) RAM_CREATE=True; /* [flg] Create (netCDF3) file(s) in RAM */ if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"open_ram") || !strcmp(opt_crr,"diskless_all")) RAM_OPEN=True; /* [flg] Open (netCDF3) file(s) in RAM */ if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"create_share")) SHARE_CREATE=True; /* [flg] Create (netCDF3) file(s) with unbuffered I/O */ if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"open_share")) SHARE_OPEN=True; /* [flg] Open (netCDF3) file(s) with unbuffered I/O */ if(!strcmp(opt_crr,"vrs") || !strcmp(opt_crr,"version")){ (void)nco_vrs_prn(CVS_Id,CVS_Revision); nco_exit(EXIT_SUCCESS); } /* endif "vrs" */ if(!strcmp(opt_crr,"wrt_tmp_fl") || !strcmp(opt_crr,"write_tmp_fl")) WRT_TMP_FL=True; if(!strcmp(opt_crr,"no_tmp_fl")) WRT_TMP_FL=False; } /* opt != 0 */ /* Process short options */ switch(opt){ case 0: /* Long options have already been processed, return */ break; case '3': /* Request netCDF3 output storage format */ fl_out_fmt=NC_FORMAT_CLASSIC; break; case '4': /* Request netCDF4 output storage format */ fl_out_fmt=NC_FORMAT_NETCDF4; break; case '5': /* Request netCDF3 64-bit offset+data storage (i.e., pnetCDF) format */ fl_out_fmt=NC_FORMAT_CDF5; break; case '6': /* Request netCDF3 64-bit offset output storage format */ fl_out_fmt=NC_FORMAT_64BIT_OFFSET; break; case '7': /* Request netCDF4-classic output storage format */ fl_out_fmt=NC_FORMAT_NETCDF4_CLASSIC; break; case 'A': /* Toggle FORCE_APPEND */ FORCE_APPEND=!FORCE_APPEND; break; case 'a': /* Re-order dimensions */ dmn_rdr_lst_in=nco_lst_prs_2D(optarg,",",&dmn_rdr_nbr_in); dmn_rdr_nbr=dmn_rdr_nbr_in; break; case 'C': /* Extract all coordinates associated with extracted variables? */ EXTRACT_ASSOCIATED_COORDINATES=False; break; case 'c': EXTRACT_ALL_COORDINATES=True; break; case 'D': /* Debugging level. Default is 0. */ nco_dbg_lvl=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); break; case 'd': /* Copy limit argument for later processing */ lmt_arg[lmt_nbr]=(char *)strdup(optarg); lmt_nbr++; break; case 'F': /* Toggle index convention. Default is 0-based arrays (C-style). */ FORTRAN_IDX_CNV=!FORTRAN_IDX_CNV; break; case 'h': /* Toggle appending to history global attribute */ HISTORY_APPEND=!HISTORY_APPEND; break; case 'L': /* [enm] Deflate level. Default is 0. */ dfl_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); break; case 'l': /* Local path prefix for files retrieved from remote file system */ fl_pth_lcl=(char *)strdup(optarg); break; case 'M': /* Packing map */ nco_pck_map_sng=(char *)strdup(optarg); nco_pck_map=nco_pck_map_get(nco_pck_map_sng); break; case 'O': /* Toggle FORCE_OVERWRITE */ FORCE_OVERWRITE=!FORCE_OVERWRITE; break; case 'o': /* Name of output file */ fl_out=(char *)strdup(optarg); break; case 'P': /* Packing policy */ nco_pck_plc_sng=(char *)strdup(optarg); break; case 'p': /* Common file path */ fl_pth=(char *)strdup(optarg); break; case 'R': /* Toggle removal of remotely-retrieved-files. Default is True. */ RM_RMT_FL_PST_PRC=!RM_RMT_FL_PST_PRC; break; case 'r': /* Print CVS program information and copyright notice */ (void)nco_vrs_prn(CVS_Id,CVS_Revision); (void)nco_lbr_vrs_prn(); (void)nco_cpy_prn(); (void)nco_cnf_prn(); nco_exit(EXIT_SUCCESS); break; #ifdef ENABLE_MPI case 'S': /* Suspend with signal handler to facilitate debugging */ if(signal(SIGUSR1,nco_cnt_run) == SIG_ERR) (void)fprintf(fp_stdout,"%s: ERROR Could not install suspend handler.\n",nco_prg_nm); while(!nco_spn_lck_brk) usleep(nco_spn_lck_us); /* Spinlock. fxm: should probably insert a sched_yield */ break; #endif /* !ENABLE_MPI */ case 't': /* Thread number */ thr_nbr=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); break; case 'U': /* Unpacking switch */ nco_pck_plc_sng=(char *)strdup("upk"); break; case 'v': /* Variables to extract/exclude */ /* Replace commas with hashes when within braces (convert back later) */ optarg_lcl=(char *)strdup(optarg); (void)nco_rx_comma2hash(optarg_lcl); var_lst_in=nco_lst_prs_2D(optarg_lcl,",",&var_lst_in_nbr); optarg_lcl=(char *)nco_free(optarg_lcl); xtr_nbr=var_lst_in_nbr; break; case 'X': /* Copy auxiliary coordinate argument for later processing */ aux_arg[aux_nbr]=(char *)strdup(optarg); aux_nbr++; MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ break; case 'x': /* Exclude rather than extract variables specified with -v */ EXCLUDE_INPUT_LIST=True; break; case '?': /* Print proper usage */ (void)nco_usg_prn(); nco_exit(EXIT_SUCCESS); break; case '-': /* Long options are not allowed */ (void)fprintf(stderr,"%s: ERROR Long options are not available in this build. Use single letter options instead.\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); break; default: /* Print proper usage */ (void)fprintf(stdout,"%s ERROR in command-line syntax/options. Please reformulate command accordingly.\n",nco_prg_nm_get()); (void)nco_usg_prn(); nco_exit(EXIT_FAILURE); break; } /* end switch */ if(opt_crr) opt_crr=(char *)nco_free(opt_crr); } /* end while loop */ /* Process positional arguments and fill-in filenames */ fl_lst_in=nco_fl_lst_mk(argv,argc,optind,&fl_nbr,&fl_out,&FL_LST_IN_FROM_STDIN,FORCE_OVERWRITE); /* Make uniform list of user-specified chunksizes */ if(cnk_nbr > 0) cnk_dmn=nco_cnk_prs(cnk_nbr,cnk_arg); /* Make uniform list of user-specified dimension limits */ lmt=nco_lmt_prs(lmt_nbr,lmt_arg); /* Initialize thread information */ thr_nbr=nco_openmp_ini(thr_nbr); in_id_arr=(int *)nco_malloc(thr_nbr*sizeof(int)); /* Parse filename */ fl_in=nco_fl_nm_prs(fl_in,0,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth); /* Make sure file is on local system and is readable or die trying */ fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id); /* Parse auxiliary coordinates */ if(aux_nbr > 0){ int aux_idx_nbr; aux=nco_aux_evl(in_id,aux_nbr,aux_arg,&aux_idx_nbr); if(aux_idx_nbr > 0){ lmt=(lmt_sct **)nco_realloc(lmt,(lmt_nbr+aux_idx_nbr)*sizeof(lmt_sct *)); int lmt_nbr_new=lmt_nbr+aux_idx_nbr; int aux_idx=0; for(int lmt_idx=lmt_nbr;lmt_idx<lmt_nbr_new;lmt_idx++) lmt[lmt_idx]=aux[aux_idx++]; lmt_nbr=lmt_nbr_new; } /* endif aux */ } /* endif aux_nbr */ /* Get number of variables, dimensions, and record dimension ID of input file */ (void)nco_inq(in_id,&nbr_dmn_fl,&nbr_var_fl,(int *)NULL,&rec_dmn_id_in); (void)nco_inq_format(in_id,&fl_in_fmt); /* Form initial extraction list which may include extended regular expressions */ xtr_lst=nco_var_lst_mk(in_id,nbr_var_fl,var_lst_in,EXCLUDE_INPUT_LIST,EXTRACT_ALL_COORDINATES,&xtr_nbr); /* Change included variables to excluded variables */ if(EXCLUDE_INPUT_LIST) xtr_lst=nco_var_lst_xcl(in_id,nbr_var_fl,xtr_lst,&xtr_nbr); /* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */ cnv=nco_cnv_ini(in_id); /* Add all coordinate variables to extraction list */ if(EXTRACT_ALL_COORDINATES) xtr_lst=nco_var_lst_crd_add(in_id,nbr_dmn_fl,nbr_var_fl,xtr_lst,&xtr_nbr,cnv); /* Extract coordinates associated with extracted variables */ if(EXTRACT_ASSOCIATED_COORDINATES) xtr_lst=nco_var_lst_crd_ass_add(in_id,xtr_lst,&xtr_nbr,cnv); /* Sort extraction list by variable ID for fastest I/O */ if(xtr_nbr > 1) xtr_lst=nco_lst_srt_nm_id(xtr_lst,xtr_nbr,False); /* Find coordinate/dimension values associated with user-specified limits NB: nco_lmt_evl() with same nc_id contains OpenMP critical region */ for(idx=0;idx<lmt_nbr;idx++) (void)nco_lmt_evl(in_id,lmt[idx],0L,FORTRAN_IDX_CNV); /* Place all dimensions in lmt_all_lst */ lmt_all_lst=(lmt_all_sct **)nco_malloc(nbr_dmn_fl*sizeof(lmt_all_sct *)); /* Initialize lmt_all_sct's */ (void)nco_msa_lmt_all_ntl(in_id,MSA_USR_RDR,lmt_all_lst,nbr_dmn_fl,lmt,lmt_nbr); /* Find dimensions associated with variables to be extracted */ dmn_lst=nco_dmn_lst_ass_var(in_id,xtr_lst,xtr_nbr,&nbr_dmn_xtr); /* Fill-in dimension structure for all extracted dimensions */ dim=(dmn_sct **)nco_malloc(nbr_dmn_xtr*sizeof(dmn_sct *)); for(idx=0;idx<nbr_dmn_xtr;idx++) dim[idx]=nco_dmn_fll(in_id,dmn_lst[idx].id,dmn_lst[idx].nm); /* Dimension list no longer needed */ dmn_lst=nco_nm_id_lst_free(dmn_lst,nbr_dmn_xtr); /* Duplicate input dimension structures for output dimension structures */ nbr_dmn_out=nbr_dmn_xtr; dmn_out=(dmn_sct **)nco_malloc(nbr_dmn_out*sizeof(dmn_sct *)); for(idx=0;idx<nbr_dmn_out;idx++){ dmn_out[idx]=nco_dmn_dpl(dim[idx]); (void)nco_dmn_xrf(dim[idx],dmn_out[idx]); } /* end loop over idx */ /* Merge hyperslab limit information into dimension structures */ if(nbr_dmn_fl > 0) (void)nco_dmn_lmt_all_mrg(dmn_out,nbr_dmn_xtr,lmt_all_lst,nbr_dmn_fl); /* No re-order dimensions specified implies packing request */ if(dmn_rdr_nbr == 0){ if(nco_pck_plc == nco_pck_plc_nil) nco_pck_plc=nco_pck_plc_get(nco_pck_plc_sng); if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: DEBUG Packing map is %s and packing policy is %s\n",nco_prg_nm_get(),nco_pck_map_sng_get(nco_pck_map),nco_pck_plc_sng_get(nco_pck_plc)); } /* endif */ /* From this point forward, assume ncpdq operator packs or re-orders, not both */ if(dmn_rdr_nbr > 0 && nco_pck_plc != nco_pck_plc_nil){ (void)fprintf(fp_stdout,"%s: ERROR %s does not support simultaneous dimension re-ordering (-a switch) and packing (-P switch).\nHINT: Invoke %s twice, once to re-order (with -a), and once to pack (with -P).\n",nco_prg_nm,nco_prg_nm,nco_prg_nm); nco_exit(EXIT_FAILURE); } /* end if */ if(dmn_rdr_nbr > 0){ /* NB: Same logic as in ncwa, perhaps combine into single function, nco_dmn_avg_rdr_prp()? */ /* Make list of user-specified dimension re-orders */ /* Create reversed dimension list */ dmn_rvr_rdr=(nco_bool *)nco_malloc(dmn_rdr_nbr*sizeof(nco_bool)); for(idx_rdr=0;idx_rdr<dmn_rdr_nbr;idx_rdr++){ if(dmn_rdr_lst_in[idx_rdr][0] == '-'){ dmn_rvr_rdr[idx_rdr]=True; /* Copy string to new memory one past negative sign to avoid losing byte */ optarg_lcl=dmn_rdr_lst_in[idx_rdr]; dmn_rdr_lst_in[idx_rdr]=(char *)strdup(optarg_lcl+1); optarg_lcl=(char *)nco_free(optarg_lcl); }else{ dmn_rvr_rdr[idx_rdr]=False; } /* end else */ } /* end loop over idx_rdr */ /* Create structured list of re-ordering dimension names and IDs */ dmn_rdr_lst=nco_dmn_lst_mk(in_id,dmn_rdr_lst_in,dmn_rdr_nbr); /* Form list of re-ordering dimensions from extracted input dimensions */ dmn_rdr=(dmn_sct **)nco_malloc(dmn_rdr_nbr*sizeof(dmn_sct *)); /* Loop over original number of re-order dimensions */ for(idx_rdr=0;idx_rdr<dmn_rdr_nbr;idx_rdr++){ for(idx=0;idx<nbr_dmn_xtr;idx++){ if(!strcmp(dmn_rdr_lst[idx_rdr].nm,dim[idx]->nm)) break; } /* end loop over idx_rdr */ if(idx != nbr_dmn_xtr) dmn_rdr[dmn_rdr_nbr_utl++]=dim[idx]; else if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(stderr,"%s: WARNING re-ordering dimension \"%s\" is not contained in any variable in extraction list\n",nco_prg_nm,dmn_rdr_lst[idx_rdr].nm); } /* end loop over idx_rdr */ dmn_rdr_nbr=dmn_rdr_nbr_utl; /* Collapse extra dimension structure space to prevent accidentally using it */ dmn_rdr=(dmn_sct **)nco_realloc(dmn_rdr,dmn_rdr_nbr*sizeof(dmn_sct *)); /* Dimension list in name-ID format is no longer needed */ dmn_rdr_lst=nco_nm_id_lst_free(dmn_rdr_lst,dmn_rdr_nbr); /* Make sure no re-ordering dimension is specified more than once */ for(idx=0;idx<dmn_rdr_nbr;idx++){ for(idx_rdr=0;idx_rdr<dmn_rdr_nbr;idx_rdr++){ if(idx_rdr != idx){ if(dmn_rdr[idx]->id == dmn_rdr[idx_rdr]->id){ (void)fprintf(fp_stdout,"%s: ERROR %s specified more than once in reducing list\n",nco_prg_nm,dmn_rdr[idx]->nm); nco_exit(EXIT_FAILURE); } /* end if */ } /* end if */ } /* end loop over idx_rdr */ } /* end loop over idx */ if(dmn_rdr_nbr > nbr_dmn_xtr){ (void)fprintf(fp_stdout,"%s: ERROR More re-ordering dimensions than extracted dimensions\n",nco_prg_nm); nco_exit(EXIT_FAILURE); } /* end if */ } /* dmn_rdr_nbr <= 0 */ /* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */ cnv=nco_cnv_ini(in_id); /* Fill-in variable structure list for all extracted variables */ var=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *)); var_out=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *)); for(idx=0;idx<xtr_nbr;idx++){ var[idx]=nco_var_fll(in_id,xtr_lst[idx].id,xtr_lst[idx].nm,dim,nbr_dmn_xtr); var_out[idx]=nco_var_dpl(var[idx]); (void)nco_xrf_var(var[idx],var_out[idx]); (void)nco_xrf_dmn(var_out[idx]); } /* end loop over idx */ /* Extraction list no longer needed */ xtr_lst=nco_nm_id_lst_free(xtr_lst,xtr_nbr); /* Divide variable lists into lists of fixed variables and variables to be processed */ (void)nco_var_lst_dvd(var,var_out,xtr_nbr,cnv,True,nco_pck_map,nco_pck_plc,dmn_rdr,dmn_rdr_nbr,&var_fix,&var_fix_out,&nbr_var_fix,&var_prc,&var_prc_out,&nbr_var_prc); /* We now have final list of variables to extract. Phew. */ if(nco_dbg_lvl >= nco_dbg_var){ for(idx=0;idx<xtr_nbr;idx++) (void)fprintf(stderr,"var[%d]->nm = %s, ->id=[%d]\n",idx,var[idx]->nm,var[idx]->id); for(idx=0;idx<nbr_var_fix;idx++) (void)fprintf(stderr,"var_fix[%d]->nm = %s, ->id=[%d]\n",idx,var_fix[idx]->nm,var_fix[idx]->id); for(idx=0;idx<nbr_var_prc;idx++) (void)fprintf(stderr,"var_prc[%d]->nm = %s, ->id=[%d]\n",idx,var_prc[idx]->nm,var_prc[idx]->id); } /* end if */ #ifdef ENABLE_MPI if(prc_rnk == rnk_mgr){ /* MPI manager code */ #endif /* !ENABLE_MPI */ /* Make output and input files consanguinous */ if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt; /* Verify output file format supports requested actions */ (void)nco_fl_fmt_vet(fl_out_fmt,cnk_nbr,dfl_lvl); /* Open output file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); if(nco_dbg_lvl >= nco_dbg_sbr) (void)fprintf(stderr,"Input, output file IDs = %d, %d\n",in_id,out_id); /* Copy global attributes */ (void)nco_att_cpy(in_id,out_id,NC_GLOBAL,NC_GLOBAL,(nco_bool)True); /* Catenate time-stamped command line to "history" global attribute */ if(HISTORY_APPEND) (void)nco_hst_att_cat(out_id,cmd_ln); if(HISTORY_APPEND && FORCE_APPEND) (void)nco_prv_att_cat(fl_in,in_id,out_id); if(gaa_nbr > 0) (void)nco_glb_att_add(out_id,gaa_arg,gaa_nbr); if(HISTORY_APPEND) (void)nco_vrs_att_cat(out_id); if(thr_nbr > 1 && HISTORY_APPEND) (void)nco_thr_att_cat(out_id,thr_nbr); #ifdef ENABLE_MPI /* Initialize MPI task information */ if(prc_nbr > 0 && HISTORY_APPEND) (void)nco_mpi_att_cat(out_id,prc_nbr); } /* !prc_rnk == rnk_mgr */ #endif /* !ENABLE_MPI */ /* If re-ordering, then in files with record dimension... */ if(dmn_rdr_nbr > 0 && rec_dmn_id_in != NCO_REC_DMN_UNDEFINED){ /* ...which, if any, output dimension structure currently holds record dimension? */ for(dmn_out_idx=0;dmn_out_idx<nbr_dmn_out;dmn_out_idx++) if(dmn_out[dmn_out_idx]->is_rec_dmn) break; if(dmn_out_idx != nbr_dmn_out){ dmn_out_idx_rec_in=dmn_out_idx; /* Initialize output record dimension to input record dimension */ rec_dmn_nm_in=rec_dmn_nm_out=dmn_out[dmn_out_idx_rec_in]->nm; }else{ dmn_out_idx_rec_in=NCO_REC_DMN_UNDEFINED; } /* end else */ } /* end if file contains record dimension */ /* If re-ordering, determine and set new dimensionality in metadata of each re-ordered variable */ if(dmn_rdr_nbr > 0){ dmn_idx_out_in=(int **)nco_malloc(nbr_var_prc*sizeof(int *)); dmn_rvr_in=(nco_bool **)nco_malloc(nbr_var_prc*sizeof(nco_bool *)); for(idx=0;idx<nbr_var_prc;idx++){ dmn_idx_out_in[idx]=(int *)nco_malloc(var_prc[idx]->nbr_dim*sizeof(int)); dmn_rvr_in[idx]=(nco_bool *)nco_malloc(var_prc[idx]->nbr_dim*sizeof(nco_bool)); /* nco_var_dmn_rdr_mtd() does re-order heavy lifting */ rec_dmn_nm_out_crr=nco_var_dmn_rdr_mtd(var_prc[idx],var_prc_out[idx],dmn_rdr,dmn_rdr_nbr,dmn_idx_out_in[idx],dmn_rvr_rdr,dmn_rvr_in[idx]); /* If record dimension required by current variable re-order... ...and variable is multi-dimensional (one dimensional arrays cannot request record dimension changes)... */ if(rec_dmn_nm_out_crr && var_prc_out[idx]->nbr_dim > 1){ /* ...differs from input and current output record dimension(s)... */ if(strcmp(rec_dmn_nm_out_crr,rec_dmn_nm_in) && strcmp(rec_dmn_nm_out_crr,rec_dmn_nm_out)){ /* ...and current output record dimension already differs from input record dimension... */ if(REDEFINED_RECORD_DIMENSION){ /* ...then requested re-order requires multiple record dimensions... */ if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(fp_stdout,"%s: WARNING Re-order requests multiple record dimensions\n. Only first request will be honored (netCDF allows only one record dimension). Record dimensions involved [original,first change request (honored),latest change request (made by variable %s)]=[%s,%s,%s]\n",nco_prg_nm,var_prc[idx]->nm,rec_dmn_nm_in,rec_dmn_nm_out,rec_dmn_nm_out_crr); break; }else{ /* !REDEFINED_RECORD_DIMENSION */ /* ...otherwise, update output record dimension name... */ rec_dmn_nm_out=rec_dmn_nm_out_crr; /* ...and set new and un-set old record dimensions... */ var_prc_out[idx]->dim[0]->is_rec_dmn=True; dmn_out[dmn_out_idx_rec_in]->is_rec_dmn=False; /* ...and set flag that record dimension has been re-defined... */ REDEFINED_RECORD_DIMENSION=True; } /* !REDEFINED_RECORD_DIMENSION */ } /* endif new and old record dimensions differ */ } /* endif current variable is record variable */ } /* end loop over var_prc */ } /* endif dmn_rdr_nbr > 0 */ /* NB: Much of following logic is required by netCDF constraint that only one record variable is allowed per file. netCDF4 will relax this constraint. Hence making following logic prettier or funcionalizing is not high priority. Logic may need to be simplified/re-written once netCDF4 is released. */ if(REDEFINED_RECORD_DIMENSION){ if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(fp_stdout,"%s: INFO Requested re-order will change record dimension from %s to %s. netCDF allows only one record dimension. Hence %s will make %s record (least rapidly varying) dimension in all variables that contain it.\n",nco_prg_nm,rec_dmn_nm_in,rec_dmn_nm_out,nco_prg_nm,rec_dmn_nm_out); /* Changing record dimension may invalidate is_rec_var flag Updating is_rec_var flag to correct value, even if value is ignored, helps keep user appraised of unexpected dimension re-orders. is_rec_var may change both for "fixed" and "processed" variables When is_rec_var changes for processed variables, may also need to change ancillary information and to check for duplicate dimensions. Ancillary information (dmn_idx_out_in) is available only for var_prc! Hence must update is_rec_var flag for var_fix and var_prc separately */ /* Update is_rec_var flag for var_fix */ for(idx=0;idx<nbr_var_fix;idx++){ /* Search all dimensions in variable for new record dimension */ for(dmn_out_idx=0;dmn_out_idx<var_fix[idx]->nbr_dim;dmn_out_idx++) if(!strcmp(var_fix[idx]->dim[dmn_out_idx]->nm,rec_dmn_nm_out)) break; /* ...Will variable be record variable in output file?... */ if(dmn_out_idx == var_fix[idx]->nbr_dim){ /* ...No. Variable will be non-record---does this change its status?... */ if(nco_dbg_lvl >= nco_dbg_var) if(var_fix[idx]->is_rec_var == True) (void)fprintf(fp_stdout,"%s: INFO Requested re-order will change variable %s from record to non-record variable\n",nco_prg_nm,var_fix[idx]->nm); /* Assign record flag dictated by re-order */ var_fix[idx]->is_rec_var=False; }else{ /* ...otherwise variable will be record variable... */ /* ...Yes. Variable will be record... */ /* ...Will becoming record variable change its status?... */ if(var_fix[idx]->is_rec_var == False){ if(nco_dbg_lvl >= nco_dbg_var) (void)fprintf(fp_stdout,"%s: INFO Requested re-order will change variable %s from non-record to record variable\n",nco_prg_nm,var_fix[idx]->nm); /* Change record flag to status dictated by re-order */ var_fix[idx]->is_rec_var=True; } /* endif status changing from non-record to record */ } /* endif variable will be record variable */ } /* end loop over var_fix */ /* Update is_rec_var flag for var_prc */ for(idx=0;idx<nbr_var_prc;idx++){ /* Search all dimensions in variable for new record dimension */ for(dmn_out_idx=0;dmn_out_idx<var_prc_out[idx]->nbr_dim;dmn_out_idx++) if(!strcmp(var_prc_out[idx]->dim[dmn_out_idx]->nm,rec_dmn_nm_out)) break; /* ...Will variable be record variable in output file?... */ if(dmn_out_idx == var_prc_out[idx]->nbr_dim){ /* ...No. Variable will be non-record---does this change its status?... */ if(nco_dbg_lvl >= nco_dbg_var) if(var_prc_out[idx]->is_rec_var == True) (void)fprintf(fp_stdout,"%s: INFO Requested re-order will change variable %s from record to non-record variable\n",nco_prg_nm,var_prc_out[idx]->nm); /* Assign record flag dictated by re-order */ var_prc_out[idx]->is_rec_var=False; }else{ /* ...otherwise variable will be record variable... */ /* ...Yes. Variable will be record... */ /* ...must ensure new record dimension is not duplicate dimension... */ if(var_prc_out[idx]->has_dpl_dmn){ int dmn_dpl_idx; for(dmn_dpl_idx=1;dmn_dpl_idx<var_prc_out[idx]->nbr_dim;dmn_dpl_idx++){ /* NB: loop starts from 1 */ if(var_prc_out[idx]->dmn_id[0] == var_prc_out[idx]->dmn_id[dmn_dpl_idx]){ (void)fprintf(stdout,"%s: ERROR Requested re-order turns duplicate non-record dimension %s in variable %s into output record dimension. netCDF does not support duplicate record dimensions in a single variable.\n%s: HINT: Exclude variable %s from extraction list with \"-x -v %s\".\n",nco_prg_nm_get(),rec_dmn_nm_out,var_prc_out[idx]->nm,nco_prg_nm_get(),var_prc_out[idx]->nm,var_prc_out[idx]->nm); nco_exit(EXIT_FAILURE); } /* endif err */ } /* end loop over dmn_out */ } /* endif has_dpl_dmn */ /* ...Will becoming record variable change its status?... */ if(var_prc_out[idx]->is_rec_var == False){ if(nco_dbg_lvl >= nco_dbg_var) (void)fprintf(fp_stdout,"%s: INFO Requested re-order will change variable %s from non-record to record variable\n",nco_prg_nm,var_prc_out[idx]->nm); /* Change record flag to status dictated by re-order */ var_prc_out[idx]->is_rec_var=True; /* ...Swap dimension information for multi-dimensional variables... */ if(var_prc_out[idx]->nbr_dim > 1){ /* Swap dimension information when turning multi-dimensional non-record variable into record variable. Single dimensional non-record variables that turn into record variables already have correct dimension information */ dmn_sct *dmn_swp; /* [sct] Dimension structure for swapping */ int dmn_idx_rec_in; /* [idx] Record dimension index in input variable */ int dmn_idx_rec_out; /* [idx] Record dimension index in output variable */ int dmn_idx_swp; /* [idx] Dimension index for swapping */ /* If necessary, swap new record dimension to first position */ /* Label indices with standard names */ dmn_idx_rec_in=dmn_out_idx; dmn_idx_rec_out=0; /* Swap indices in map */ dmn_idx_swp=dmn_idx_out_in[idx][dmn_idx_rec_out]; dmn_idx_out_in[idx][dmn_idx_rec_out]=dmn_idx_rec_in; dmn_idx_out_in[idx][dmn_idx_rec_in]=dmn_idx_swp; /* Swap dimensions in list */ dmn_swp=var_prc_out[idx]->dim[dmn_idx_rec_out]; var_prc_out[idx]->dim[dmn_idx_rec_out]=var_prc_out[idx]->dim[dmn_idx_rec_in]; var_prc_out[idx]->dim[dmn_idx_rec_in]=dmn_swp; /* NB: Change dmn_id,cnt,srt,end,srd together to minimize chances of forgetting one */ /* Correct output variable structure copy of output record dimension information */ var_prc_out[idx]->dmn_id[dmn_idx_rec_out]=var_prc_out[idx]->dim[dmn_idx_rec_out]->id; var_prc_out[idx]->cnt[dmn_idx_rec_out]=var_prc_out[idx]->dim[dmn_idx_rec_out]->cnt; var_prc_out[idx]->srt[dmn_idx_rec_out]=var_prc_out[idx]->dim[dmn_idx_rec_out]->srt; var_prc_out[idx]->end[dmn_idx_rec_out]=var_prc_out[idx]->dim[dmn_idx_rec_out]->end; var_prc_out[idx]->srd[dmn_idx_rec_out]=var_prc_out[idx]->dim[dmn_idx_rec_out]->srd; /* Correct output variable structure copy of input record dimension information */ var_prc_out[idx]->dmn_id[dmn_idx_rec_in]=var_prc_out[idx]->dim[dmn_idx_rec_in]->id; var_prc_out[idx]->cnt[dmn_idx_rec_in]=var_prc_out[idx]->dim[dmn_idx_rec_in]->cnt; var_prc_out[idx]->srt[dmn_idx_rec_in]=var_prc_out[idx]->dim[dmn_idx_rec_in]->srt; var_prc_out[idx]->end[dmn_idx_rec_in]=var_prc_out[idx]->dim[dmn_idx_rec_in]->end; var_prc_out[idx]->srd[dmn_idx_rec_in]=var_prc_out[idx]->dim[dmn_idx_rec_in]->srd; } /* endif multi-dimensional */ } /* endif status changing from non-record to record */ } /* endif variable will be record variable */ } /* end loop over var_prc */ } /* !REDEFINED_RECORD_DIMENSION */ #ifdef ENABLE_MPI if(prc_rnk == rnk_mgr){ /* Defining dimension in output file done by Manager alone */ #endif /* !ENABLE_MPI */ /* Once new record dimension, if any, is known, define dimensions in output file */ (void)nco_dmn_dfn(fl_out,out_id,dmn_out,nbr_dmn_out); #ifdef ENABLE_MPI } /* prc_rnk != rnk_mgr */ #endif /* !ENABLE_MPI */ /* Alter metadata for variables that will be packed */ if(nco_pck_plc != nco_pck_plc_nil){ if(nco_pck_plc != nco_pck_plc_upk){ /* Allocate attribute list container for maximum number of entries */ aed_lst_add_fst=(aed_sct *)nco_malloc(nbr_var_prc*sizeof(aed_sct)); aed_lst_scl_fct=(aed_sct *)nco_malloc(nbr_var_prc*sizeof(aed_sct)); } /* endif packing */ for(idx=0;idx<nbr_var_prc;idx++){ nco_pck_mtd(var_prc[idx],var_prc_out[idx],nco_pck_map,nco_pck_plc); if(nco_pck_plc != nco_pck_plc_upk){ /* Use same copy of attribute name for all edits */ aed_lst_add_fst[idx].att_nm=add_fst_sng; aed_lst_scl_fct[idx].att_nm=scl_fct_sng; } /* endif packing */ } /* end loop over var_prc */ } /* nco_pck_plc == nco_pck_plc_nil */ #ifdef ENABLE_MPI if(prc_rnk == rnk_mgr){ /* MPI manager code */ #endif /* !ENABLE_MPI */ /* Define variables in output file, copy their attributes */ (void)nco_var_dfn(in_id,fl_out,out_id,var_out,xtr_nbr,(dmn_sct **)NULL,(int)0,nco_pck_map,nco_pck_plc,dfl_lvl); /* Set chunksize parameters */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr); /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Take output file out of define mode */ if(hdr_pad == 0UL){ (void)nco_enddef(out_id); }else{ (void)nco__enddef(out_id,hdr_pad); if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO Padding header with %lu extra bytes\n",nco_prg_nm_get(),(unsigned long)hdr_pad); } /* hdr_pad */ #ifdef ENABLE_MPI } /* prc_rnk != rnk_mgr */ /* Manager obtains output filename and broadcasts to workers */ if(prc_rnk == rnk_mgr) fl_nm_lng=(int)strlen(fl_out_tmp); MPI_Bcast(&fl_nm_lng,1,MPI_INT,0,MPI_COMM_WORLD); if(prc_rnk != rnk_mgr) fl_out_tmp=(char *)nco_malloc((fl_nm_lng+1)*sizeof(char)); MPI_Bcast(fl_out_tmp,fl_nm_lng+1,MPI_CHAR,0,MPI_COMM_WORLD); #endif /* !ENABLE_MPI */ /* Assign zero to start and unity to stride vectors in output variables */ (void)nco_var_srd_srt_set(var_out,xtr_nbr); #ifdef ENABLE_MPI if(prc_rnk == rnk_mgr){ /* MPI manager code */ TKN_WRT_FREE=False; #endif /* !ENABLE_MPI */ /* Copy variable data for non-processed variables */ (void)nco_msa_var_val_cpy(in_id,out_id,var_fix,nbr_var_fix,lmt_all_lst,nbr_dmn_fl); #ifdef ENABLE_MPI /* Close output file so workers can open it */ nco_close(out_id); TKN_WRT_FREE=True; } /* prc_rnk != rnk_mgr */ #endif /* !ENABLE_MPI */ /* Close first input netCDF file */ nco_close(in_id); /* Loop over input files (not currently used, fl_nbr == 1) */ for(fl_idx=0;fl_idx<fl_nbr;fl_idx++){ /* Parse filename */ if(fl_idx != 0) fl_in=nco_fl_nm_prs(fl_in,fl_idx,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\nInput file %d is %s; ",fl_idx,fl_in); /* Make sure file is on local system and is readable or die trying */ if(fl_idx != 0) fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"local file %s:\n",fl_in); /* Open file once per thread to improve caching */ for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,in_id_arr+thr_idx); #ifdef ENABLE_MPI if(prc_rnk == rnk_mgr){ /* MPI manager code */ /* Compensate for incrementing on each worker's first message */ var_wrt_nbr=-prc_nbr+1; idx=0; /* While variables remain to be processed or written... */ while(var_wrt_nbr < nbr_var_prc){ /* Receive message from any worker */ MPI_Recv(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&mpi_stt); /* Obtain MPI message tag type */ msg_tag_typ=mpi_stt.MPI_TAG; /* Get sender's prc_rnk */ rnk_wrk=wrk_id_bfr[0]; /* Allocate next variable, if any, to worker */ if(msg_tag_typ == msg_tag_wrk_rqs){ var_wrt_nbr++; /* [nbr] Number of variables written */ /* Worker closed output file before sending msg_tag_wrk_rqs */ TKN_WRT_FREE=True; if(idx > nbr_var_prc-1){ msg_bfr[0]=idx_all_wrk_ass; /* [enm] All variables already assigned */ msg_bfr[1]=out_id; /* Output file ID */ }else{ /* Tell requesting worker to allocate space for next variable */ msg_bfr[0]=idx; /* [idx] Variable to be processed */ msg_bfr[1]=out_id; /* Output file ID */ msg_bfr[2]=var_prc_out[idx]->id; /* [id] Variable ID in output file */ /* Point to next variable on list */ idx++; } /* endif idx */ MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_wrk_rsp,MPI_COMM_WORLD); /* msg_tag_typ != msg_tag_wrk_rqs */ }else if(msg_tag_typ == msg_tag_tkn_wrt_rqs){ /* Allocate token if free, else ask worker to try later */ if(TKN_WRT_FREE){ TKN_WRT_FREE=False; msg_bfr[0]=tkn_wrt_rqs_xcp; /* Accept request for write token */ }else{ msg_bfr[0]=tkn_wrt_rqs_dny; /* Deny request for write token */ } /* !TKN_WRT_FREE */ MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD); } /* msg_tag_typ != msg_tag_tkn_wrt_rqs */ } /* end while var_wrt_nbr < nbr_var_prc */ }else{ /* prc_rnk != rnk_mgr, end Manager code begin Worker code */ wrk_id_bfr[0]=prc_rnk; while(1){ /* While work remains... */ /* Send msg_tag_wrk_rqs */ wrk_id_bfr[0]=prc_rnk; MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_rqs,MPI_COMM_WORLD); /* Receive msg_tag_wrk_rsp */ MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,0,msg_tag_wrk_rsp,MPI_COMM_WORLD,&mpi_stt); idx=msg_bfr[0]; out_id=msg_bfr[1]; if(idx == idx_all_wrk_ass) break; else{ lcl_idx_lst[lcl_nbr_var]=idx; /* storing the indices for subsequent processing by the worker */ lcl_nbr_var++; var_prc_out[idx]->id=msg_bfr[2]; /* Process this variable same as UP code */ #if 0 /* NB: Immediately preceding MPI else scope confounds Emacs indentation Fake end scope restores correct indentation, simplifies code-checking */ } /* fake end else */ #endif /* !0 */ #else /* !ENABLE_MPI */ #ifdef _OPENMP #pragma omp parallel for default(none) private(idx,in_id) shared(aed_lst_add_fst,aed_lst_scl_fct,nco_dbg_lvl,dmn_idx_out_in,dmn_rdr_nbr,dmn_rvr_in,in_id_arr,nbr_var_prc,nco_pck_map,nco_pck_plc,out_id,nco_prg_nm,rcd,var_prc,var_prc_out) #endif /* !_OPENMP */ /* UP and SMP codes main loop over variables */ for(idx=0;idx<nbr_var_prc;idx++){ /* Process all variables in current file */ #endif /* ENABLE_MPI */ in_id=in_id_arr[omp_get_thread_num()]; /* fxm TODO nco638 temporary fix? */ var_prc[idx]->nc_id=in_id; if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm); if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr); /* Retrieve variable from disk into memory */ /* NB: nco_var_get() with same nc_id contains OpenMP critical region */ (void)nco_msa_var_get(in_id,var_prc[idx],lmt_all_lst,nbr_dmn_fl); if(dmn_rdr_nbr > 0){ if((var_prc_out[idx]->val.vp=(void *)nco_malloc_flg(var_prc_out[idx]->sz*nco_typ_lng(var_prc_out[idx]->type))) == NULL){ (void)fprintf(fp_stdout,"%s: ERROR Unable to malloc() %ld*%lu bytes for value buffer for variable %s in main()\n",nco_prg_nm_get(),var_prc_out[idx]->sz,(unsigned long)nco_typ_lng(var_prc_out[idx]->type),var_prc_out[idx]->nm); nco_exit(EXIT_FAILURE); } /* endif err */ /* Change dimensionionality of values */ rcd=nco_var_dmn_rdr_val(var_prc[idx],var_prc_out[idx],dmn_idx_out_in[idx],dmn_rvr_in[idx]); /* Re-ordering required two value buffers, time to free input buffer */ var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp); /* Free current dimension correspondence */ dmn_idx_out_in[idx]=nco_free(dmn_idx_out_in[idx]); dmn_rvr_in[idx]=nco_free(dmn_rvr_in[idx]); } /* endif dmn_rdr_nbr > 0 */ if(nco_pck_plc != nco_pck_plc_nil){ /* Copy input variable buffer to processed variable buffer */ /* fxm: this is dangerous and leads to double free()'ing variable buffer */ var_prc_out[idx]->val=var_prc[idx]->val; /* (Un-)Pack variable according to packing specification */ nco_pck_val(var_prc[idx],var_prc_out[idx],nco_pck_map,nco_pck_plc,aed_lst_add_fst+idx,aed_lst_scl_fct+idx); } /* endif dmn_rdr_nbr > 0 */ #ifdef ENABLE_MPI /* Obtain token and prepare to write */ while(1){ /* Send msg_tag_tkn_wrt_rqs repeatedly until token obtained */ wrk_id_bfr[0]=prc_rnk; MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rqs,MPI_COMM_WORLD); MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD,&mpi_stt); tkn_wrt_rsp=msg_bfr[0]; /* Wait then re-send request */ if(tkn_wrt_rsp == tkn_wrt_rqs_dny) sleep(tkn_wrt_rqs_ntv); else break; } /* end while loop waiting for write token */ /* Worker has token---prepare to write */ if(tkn_wrt_rsp == tkn_wrt_rqs_xcp){ if(RAM_OPEN) md_open=NC_WRITE|NC_SHARE|NC_DISKLESS; else md_open=NC_WRITE|NC_SHARE; rcd=nco_fl_open(fl_out_tmp,md_open,&bfr_sz_hnt,&out_id); /* Set chunksize parameters */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr); /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); #else /* !ENABLE_MPI */ #ifdef _OPENMP #pragma omp critical #endif /* _OPENMP */ #endif /* !ENABLE_MPI */ { /* begin OpenMP critical */ /* Common code for UP, SMP, and MPI */ /* Copy variable to output file then free value buffer */ if(var_prc_out[idx]->nbr_dim == 0){ (void)nco_put_var1(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type); }else{ /* end if variable is scalar */ (void)nco_put_vara(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type); } /* end if variable is array */ } /* end OpenMP critical */ /* Free current output buffer */ var_prc_out[idx]->val.vp=nco_free(var_prc_out[idx]->val.vp); #ifdef ENABLE_MPI /* Close output file and increment written counter */ nco_close(out_id); var_wrt_nbr++; } /* endif tkn_wrt_rqs_xcp */ } /* end else !idx_all_wrk_ass */ } /* end while loop requesting work/token */ } /* endif Worker */ #else /* !ENABLE_MPI */ } /* end (OpenMP parallel for) loop over idx */ #endif /* !ENABLE_MPI */ if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(fp_stderr,"\n"); #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); if(prc_rnk == rnk_mgr) { /* Manager only */ MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,prc_rnk+1,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD); } /* ! prc_rnk == rnk_mgr */ else{ /* Workers */ MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,prc_rnk-1,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD,&mpi_stt); #endif /* !ENABLE_MPI */ /* Write/overwrite packing attributes for newly packed and re-packed variables Logic here should nearly mimic logic in nco_var_dfn() */ if(nco_pck_plc != nco_pck_plc_nil && nco_pck_plc != nco_pck_plc_upk){ nco_bool nco_pck_plc_alw; /* [flg] Packing policy allows packing nc_typ_in */ /* ...put file in define mode to allow metadata writing... */ if(RAM_OPEN) md_open=NC_WRITE|NC_DISKLESS; else md_open=NC_WRITE; rcd=nco_fl_open(fl_out_tmp,md_open,&bfr_sz_hnt,&out_id); (void)nco_redef(out_id); /* ...loop through all variables that may have been packed... */ #ifdef ENABLE_MPI for(jdx=0;jdx<lcl_nbr_var;jdx++){ idx=lcl_idx_lst[jdx]; #else /* !ENABLE_MPI */ for(idx=0;idx<nbr_var_prc;idx++){ #endif /* !ENABLE_MPI */ /* nco_var_dfn() pre-defined dummy packing attributes in output file only for input variables considered "packable" */ if((nco_pck_plc_alw=nco_pck_plc_typ_get(nco_pck_map,var_prc[idx]->typ_upk,(nc_type *)NULL))){ /* Verify input variable was newly packed by this operator Writing pre-existing (non-re-packed) attributes here would fail because nco_pck_dsk_inq() never fills in var->scl_fct.vp and var->add_fst.vp Logic is same as in nco_var_dfn() (except var_prc[] instead of var[]) If operator newly packed this particular variable... */ if( /* ...either because operator newly packs all variables... */ (nco_pck_plc == nco_pck_plc_all_new_att) || /* ...or because operator newly packs un-packed variables like this one... */ (nco_pck_plc == nco_pck_plc_all_xst_att && !var_prc[idx]->pck_ram) || /* ...or because operator re-packs packed variables like this one... */ (nco_pck_plc == nco_pck_plc_xst_new_att && var_prc[idx]->pck_ram) ){ /* Replace dummy packing attributes with final values, or delete them */ if(nco_dbg_lvl >= nco_dbg_io) (void)fprintf(stderr,"%s: main() replacing dummy packing attribute values for variable %s\n",nco_prg_nm,var_prc[idx]->nm); (void)nco_aed_prc(out_id,aed_lst_add_fst[idx].id,aed_lst_add_fst[idx]); (void)nco_aed_prc(out_id,aed_lst_scl_fct[idx].id,aed_lst_scl_fct[idx]); } /* endif variable is newly packed by this operator */ } /* endif nco_pck_plc_alw */ } /* end loop over var_prc */ (void)nco_enddef(out_id); #ifdef ENABLE_MPI nco_close(out_id); #endif /* !ENABLE_MPI */ } /* nco_pck_plc == nco_pck_plc_nil || nco_pck_plc == nco_pck_plc_upk */ #ifdef ENABLE_MPI if(prc_rnk == prc_nbr-1) /* Send Token to Manager */ MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD); else MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,prc_rnk+1,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD); } /* !Workers */ if(prc_rnk == rnk_mgr){ /* Only Manager */ MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,prc_nbr-1,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD,&mpi_stt); } /* prc_rnk != rnk_mgr */ #endif /* !ENABLE_MPI */ /* Close input netCDF file */ for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_arr[thr_idx]); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in); } /* end loop over fl_idx */ #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); /* Manager moves output file (closed by workers) from temporary to permanent location */ if(prc_rnk == rnk_mgr) (void)nco_fl_mv(fl_out_tmp,fl_out); #else /* !ENABLE_MPI */ /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); #endif /* end !ENABLE_MPI */ /* Clean memory unless dirty memory allowed */ if(flg_mmr_cln){ /* ncpdq-specific memory cleanup */ if(dmn_rdr_nbr > 0){ /* Free dimension correspondence list */ for(idx=0;idx<nbr_var_prc;idx++){ dmn_idx_out_in[idx]=(int *)nco_free(dmn_idx_out_in[idx]); dmn_rvr_in[idx]=(nco_bool *)nco_free(dmn_rvr_in[idx]); } /* end loop over idx */ if(dmn_idx_out_in) dmn_idx_out_in=(int **)nco_free(dmn_idx_out_in); if(dmn_rvr_in) dmn_rvr_in=(nco_bool **)nco_free(dmn_rvr_in); if(dmn_rvr_rdr) dmn_rvr_rdr=(nco_bool *)nco_free(dmn_rvr_rdr); if(dmn_rdr_nbr_in > 0) dmn_rdr_lst_in=nco_sng_lst_free(dmn_rdr_lst_in,dmn_rdr_nbr_in); /* Free dimension list pointers */ dmn_rdr=(dmn_sct **)nco_free(dmn_rdr); /* Dimension structures in dmn_rdr are owned by dmn and dmn_out, free'd later */ } /* endif dmn_rdr_nbr > 0 */ if(nco_pck_plc != nco_pck_plc_nil){ if(nco_pck_plc_sng) nco_pck_plc_sng=(char *)nco_free(nco_pck_plc_sng); if(nco_pck_map_sng) nco_pck_map_sng=(char *)nco_free(nco_pck_map_sng); if(nco_pck_plc != nco_pck_plc_upk){ /* No need for loop over var_prc variables to free attribute values Variable structures and attribute edit lists share same attribute values Free them only once, and do it in nco_var_free() */ aed_lst_add_fst=(aed_sct *)nco_free(aed_lst_add_fst); aed_lst_scl_fct=(aed_sct *)nco_free(aed_lst_scl_fct); } /* nco_pck_plc == nco_pck_plc_upk */ } /* nco_pck_plc == nco_pck_plc_nil */ /* NB: lmt now referenced within lmt_all_lst[idx] */ for(idx=0;idx<nbr_dmn_fl;idx++) for(jdx=0;jdx< lmt_all_lst[idx]->lmt_dmn_nbr;jdx++) lmt_all_lst[idx]->lmt_dmn[jdx]=nco_lmt_free(lmt_all_lst[idx]->lmt_dmn[jdx]); if(nbr_dmn_fl > 0) lmt_all_lst=nco_lmt_all_lst_free(lmt_all_lst,nbr_dmn_fl); lmt=(lmt_sct**)nco_free(lmt); /* NCO-generic clean-up */ /* Free individual strings/arrays */ if(cmd_ln) cmd_ln=(char *)nco_free(cmd_ln); if(cnk_map_sng) cnk_map_sng=(char *)nco_free(cnk_map_sng); if(cnk_plc_sng) cnk_plc_sng=(char *)nco_free(cnk_plc_sng); if(fl_in) fl_in=(char *)nco_free(fl_in); if(fl_out) fl_out=(char *)nco_free(fl_out); if(fl_out_tmp) fl_out_tmp=(char *)nco_free(fl_out_tmp); if(fl_pth) fl_pth=(char *)nco_free(fl_pth); if(fl_pth_lcl) fl_pth_lcl=(char *)nco_free(fl_pth_lcl); if(in_id_arr) in_id_arr=(int *)nco_free(in_id_arr); /* Free lists of strings */ if(fl_lst_in && fl_lst_abb == NULL) fl_lst_in=nco_sng_lst_free(fl_lst_in,fl_nbr); if(fl_lst_in && fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,1); if(fl_lst_abb) fl_lst_abb=nco_sng_lst_free(fl_lst_abb,abb_arg_nbr); if(gaa_nbr > 0) gaa_arg=nco_sng_lst_free(gaa_arg,gaa_nbr); if(var_lst_in_nbr > 0) var_lst_in=nco_sng_lst_free(var_lst_in,var_lst_in_nbr); /* Free limits */ for(idx=0;idx<lmt_nbr;idx++) lmt_arg[idx]=(char *)nco_free(lmt_arg[idx]); for(idx=0;idx<aux_nbr;idx++) aux_arg[idx]=(char *)nco_free(aux_arg[idx]); if(aux_nbr > 0) aux=(lmt_sct **)nco_free(aux); /* Free chunking information */ for(idx=0;idx<cnk_nbr;idx++) cnk_arg[idx]=(char *)nco_free(cnk_arg[idx]); if(cnk_nbr > 0) cnk_dmn=nco_cnk_lst_free(cnk_dmn,cnk_nbr); /* Free dimension lists */ if(nbr_dmn_xtr > 0) dim=nco_dmn_lst_free(dim,nbr_dmn_xtr); if(nbr_dmn_xtr > 0) dmn_out=nco_dmn_lst_free(dmn_out,nbr_dmn_xtr); /* Free variable lists */ if(xtr_nbr > 0) var=nco_var_lst_free(var,xtr_nbr); if(xtr_nbr > 0) var_out=nco_var_lst_free(var_out,xtr_nbr); var_prc=(var_sct **)nco_free(var_prc); var_prc_out=(var_sct **)nco_free(var_prc_out); var_fix=(var_sct **)nco_free(var_fix); var_fix_out=(var_sct **)nco_free(var_fix_out); } /* !flg_mmr_cln */ #ifdef ENABLE_MPI MPI_Finalize(); #endif /* !ENABLE_MPI */ nco_exit_gracefully(); return EXIT_SUCCESS; } /* end main() */
ElementWiseExtremumLayer.h
// Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved. // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef ELEMENT_WISE_EXTREMUM_LAYER_H #define ELEMENT_WISE_EXTREMUM_LAYER_H #include <training/base/common/Common.h> #include <training/base/layers/BroadcastingLayer.h> #include <type_traits> namespace { struct ElementWiseMaxHelper { static bool compare(const raul::dtype x, const raul::dtype y) { return y >= x; } static raul::dtype getBound() { return -std::numeric_limits<raul::dtype>::infinity(); } }; struct ElementWiseMinHelper { static bool compare(const raul::dtype x, const raul::dtype y) { return y <= x; } static raul::dtype getBound() { return std::numeric_limits<raul::dtype>::infinity(); } }; } namespace raul { /** * @brief Template for ElementWiseMax and ElementWiseMin layers * @tparam T */ template<typename T> class ElementWiseExtremumLayer : public BroadcastingLayer { public: ElementWiseExtremumLayer(const Name& name, const ElementWiseLayerParams& params, NetworkParameters& networkParameters); ElementWiseExtremumLayer(ElementWiseExtremumLayer&&) = default; ElementWiseExtremumLayer(const ElementWiseExtremumLayer&) = delete; ElementWiseExtremumLayer& operator=(const ElementWiseExtremumLayer&) = delete; void forwardComputeImpl(NetworkMode mode) override; void backwardComputeImpl() override; private: bool mBroadcast; std::vector<size_t> mIndexes; T mComparator; Name mTmpBufferName; size_t mDepth; size_t mHeight; size_t mWidth; }; template<typename T> ElementWiseExtremumLayer<T>::ElementWiseExtremumLayer(const Name& name, const ElementWiseLayerParams& params, NetworkParameters& networkParameters) : BroadcastingLayer(name, "ElementWiseExtremum", params, networkParameters) , mBroadcast(params.mBroadcast) , mTmpBufferName("TempStorageForIntermediateCalculations") { for (const auto& input : mInputs) { mNetworkParams.mWorkflow.copyDeclaration(name, input, raul::Workflow::Usage::Forward, raul::Workflow::Mode::Read); mNetworkParams.mWorkflow.copyDeclaration(name, input, input.grad(), DEC_BACK_WRIT_ZERO); } shape outputShape{ 1u, mNetworkParams.mWorkflow.getDepth(mInputs[0]), mNetworkParams.mWorkflow.getHeight(mInputs[0]), mNetworkParams.mWorkflow.getWidth(mInputs[0]) }; if (mBroadcast) { for (size_t j = 1; j < mInputs.size(); ++j) { shape inputShape{ 1u, mNetworkParams.mWorkflow.getDepth(mInputs[j]), mNetworkParams.mWorkflow.getHeight(mInputs[j]), mNetworkParams.mWorkflow.getWidth(mInputs[j]) }; std::transform(inputShape.begin(), inputShape.end(), outputShape.begin(), outputShape.begin(), [](auto a, auto b) { return std::max(a, b); }); } } mNetworkParams.mWorkflow.tensorNeeded(name, mOutputs[0], raul::WShape{ BS(), outputShape[1], outputShape[2], outputShape[3] }, DEC_FORW_WRIT); mNetworkParams.mWorkflow.copyDeclaration(name, mOutputs[0], mOutputs[0].grad(), DEC_BACK_READ); mDepth = mNetworkParams.mWorkflow.getDepth(mOutputs[0]); mHeight = mNetworkParams.mWorkflow.getHeight(mOutputs[0]); mWidth = mNetworkParams.mWorkflow.getWidth(mOutputs[0]); } template<typename T> void ElementWiseExtremumLayer<T>::forwardComputeImpl(NetworkMode) { determineBroadcastFlags(); if (!mBroadcast && std::any_of(mBroadcastQuery.begin(), mBroadcastQuery.end(), [](const auto& needToBroadcast) { return needToBroadcast; })) { THROW("ElementWiseExtremumLayer", mName, "input size mismatch"); } if (mNetworkParams.mWorkflow.getExecutionTarget() == ExecutionTarget::CPU) { Tensor& output = mNetworkParams.mMemoryManager[mOutputs[0]]; std::fill(output.begin(), output.end(), mComparator.getBound()); mIndexes.resize(output.size()); // Comparison for (size_t q = 0; q < mInputs.size(); ++q) { const Tensor& input = mNetworkParams.mMemoryManager[mInputs[q]]; if (mBroadcastQuery[q]) { auto input_viewer = input.getBroadcastedViewer(output.getShape()); #if defined(_OPENMP) #pragma omp parallel for #endif for (size_t i = 0; i < output.size(); ++i) { if (mComparator.compare(output[i], input_viewer[i])) { output[i] = input_viewer[i]; mIndexes[i] = q; } } } else { #if defined(_OPENMP) #pragma omp parallel for #endif for (size_t i = 0; i < output.size(); ++i) { if (mComparator.compare(output[i], input[i])) { output[i] = input[i]; mIndexes[i] = q; } } } } } else { THROW(mTypeName, mName, "unsupported execution target"); } } template<typename T> void ElementWiseExtremumLayer<T>::backwardComputeImpl() { if (mNetworkParams.mWorkflow.getExecutionTarget() == ExecutionTarget::CPU) { const Tensor& delta = mNetworkParams.mMemoryManager[mOutputs[0].grad()]; for (size_t i = 0; i < mInputs.size(); i++) { const auto input_name = mInputs[i]; // if (mNetworkParams.isGradNeeded(input_name)) { auto& in_nabla_tensor = mNetworkParams.mMemoryManager[input_name.grad()]; if (mBroadcastQuery[i]) { auto in_nabla = in_nabla_tensor.getBroadcastedViewer(delta.getShape()); for (size_t j = 0; j < mIndexes.size(); j++) { if (mIndexes[j] == i) { in_nabla[j] += delta[j]; } } } else { #if defined(_OPENMP) #pragma omp parallel for #endif for (size_t j = 0; j < mIndexes.size(); j++) { if (mIndexes[j] == i) { in_nabla_tensor[j] += delta[j]; } } } } } } else { THROW(mTypeName, mName, "unsupported execution target"); } } } // raul namespace #endif
manage_rkhevi.c
/* This source file is part of the Geophysical Fluids Modeling Framework (GAME), which is released under the MIT license. Github repository: https://github.com/OpenNWP/GAME */ /* This file manages the RKHEVI time stepping. */ #include <stdlib.h> #include <stdio.h> #include <geos95.h> #include "../game_types.h" #include "../spatial_operators/spatial_operators.h" #include "time_stepping.h" #include "../radiation/radiation.h" #include "../thermodynamics/thermodynamics.h" #include "../io/io.h" int manage_rkhevi(State *state_old, State *state_new, Soil *soil, Grid *grid, Dualgrid *dualgrid, State *state_tendency, Diagnostics *diagnostics, Forcings *forcings, Irreversible_quantities *irrev, Config *config, double delta_t, double time_coordinate, int total_step_counter) { // slow terms (diffusion) update switch int slow_update_bool = 0; // check if slow terms have to be updated if (fmod(total_step_counter, config -> slow_fast_ratio) == 0) { // setting the respective update switch to one slow_update_bool = 1; } // diagnosing the temperature temperature_diagnostics(state_old, grid, diagnostics); // interaction with soil if (config -> soil_on == 1) { soil_interaction(state_old, soil, diagnostics, forcings, grid, delta_t); } /* Loop over the RK substeps. */ int vector_index; for (int rk_step = 0; rk_step < 2; ++rk_step) { /* state_old remains unchanged the whole time. At i == 0, it is state_old == state_new. */ // 1.) explicit component of the momentum equation // ----------------------------------------------- // Update of the pressure gradient. if (rk_step == 0) { manage_pressure_gradient(state_new, grid, dualgrid, diagnostics, forcings, irrev, config); } // Only the horizontal momentum is a forward tendency. vector_tendencies_expl(state_new, state_tendency, grid, dualgrid, diagnostics, forcings, irrev, config, slow_update_bool, rk_step, delta_t); // time stepping for the horizontal momentum can be directly executed #pragma omp parallel for private(vector_index) for (int h_index = 0; h_index < NO_OF_VECTORS_H; ++h_index) { for (int layer_index = 0; layer_index < NO_OF_LAYERS; ++layer_index) { vector_index = NO_OF_SCALARS_H + layer_index*NO_OF_VECTORS_PER_LAYER + h_index; state_new -> wind[vector_index] = state_old -> wind[vector_index] + delta_t*state_tendency -> wind[vector_index]; } } // Horizontal velocity can be considered to be updated from now on. // 2.) Explicit component of the generalized density equations. // ------------------------------------------------------------ // Radiation is updated here. if (config -> rad_on > 0 && config -> rad_update == 1 && rk_step == 0) { call_radiation(state_old, soil, grid, dualgrid, state_tendency, diagnostics, forcings, irrev, config, delta_t, time_coordinate); } scalar_tendencies_expl(state_old, state_new, state_tendency, soil, grid, delta_t, diagnostics, forcings, irrev, config, rk_step, slow_update_bool); // 3.) Vertical sound wave solver. // ------------------------------- three_band_solver_ver_waves(state_old, state_new, state_tendency, diagnostics, config, delta_t, grid, rk_step); // 4.) Solving the implicit component of the generalized density equations for tracers. // ------------------------------------------------------------------------------------ if (NO_OF_CONSTITUENTS > 1) { three_band_solver_gen_densitites(state_old, state_new, state_tendency, diagnostics, config, delta_t, grid); } // At the second RK step slow terms are never updated. slow_update_bool = 0; } // saturation adjustment, calculation of latent heating rates moisturizer(state_new, delta_t, diagnostics, irrev, config, grid, soil); return 0; }
filter.c
/* This program was modified (incrementally simplified) in 2013-2014 by Grigori Fursin to understand performance regressions for different images ... */ /* This program detects the edges in a 256 gray-level 128 x 128 pixel image. The program relies on a 2D-convolution routine to convolve the image with kernels (Sobel operators) that expose horizontal and vertical edge information. The following is a block diagram of the steps performed in edge detection, +---------+ +----------+ Input |Smoothing| |Horizontal|-------+ Image -->| Filter |---+-->| Gradient | | +---------+ | +----------+ +----x-----+ +---------+ Binary | | Gradient | | Apply | Edge | | Combining|-->|Threshold|->Detected | +----------+ +----x-----+ +----x----+ Output | | Vertical | | | +-->| Gradient |-------+ | +----------+ Threshold Value This program is based on the routines and algorithms found in the book "C Language Algorithms for Digital Signal Processing" by P.M. Embree and B. Kimble. Copyright (c) 1992 -- Mazen A.R. Saghir -- University of Toronto */ #include <omp.h> #include <stdio.h> #include <stdlib.h> /* #include <string.h> */ /* #include "traps.h" */ #include "filter.h" FILE *input_fp=NULL; FILE *output_fp=NULL; int input_dsp(); /* input_dsp (dest, char*) */ void output_dsp(); /* output_dsp (source) */ void filter_codelet(); int image_buffer1[N][N]; int image_buffer2[N][N]; int image_buffer3[N][N]; int filter[K][K]; int main(int argc, const char **argv) { int *matrix_ptr1; int *matrix_ptr2; int *matrix_ptr3; int *filter_ptr; int temp1; int temp2; int temp3; int v1; int v2; int v3; int i; int j; void convolve2d(); long rr=0, r=1; int tm=0; char *stm; if (argc<1) { printf("Usage: ./a.out <input file name with an image in a raw format\n"); return 1; } /* FGG adding kernel repetition */ if (getenv("CT_REPEAT_MAIN")!=NULL) r=atol(getenv("CT_REPEAT_MAIN")); /* FGG adding "time of the day" feature */ stm=getenv("CT_TIME"); if (stm!=NULL) { if (strcmp(stm, "day")==0) tm=0; else if (strcmp(stm, "night")==0) tm=1; } /* Read input image. */ input_dsp(image_buffer1, argv[1]); /* Initialize image_buffer2 and image_buffer3 */ for (i = 0; i < N; i++) { for (j = 0; j < N; ++j) { image_buffer2[i][j] = 0; } } #pragma omp parallel for printf("Time of the day feature: %u\n",tm); for (rr=0; rr<r; rr++) { if (tm==0) filter_codelet_day(image_buffer1, image_buffer2); else filter_codelet_night(image_buffer1, image_buffer2); } /* Store binary image. */ output_dsp(image_buffer2, N*N); return 0; } /* This function convolves the input image by the kernel and stores the result in the output image. */ void convolve2d(input_image, kernel, output_image) int *input_image; int *kernel; int *output_image; { int *kernel_ptr; int *input_image_ptr; int *output_image_ptr; int *kernel_offset; int *input_image_offset; int *output_image_offset; int i; int j; int c; int r; int row; int col; int normal_factor; int sum; int temp1; int temp2; int dead_rows; int dead_cols; /* Set the number of dead rows and columns. These represent the band of rows and columns around the edge of the image whose pixels must be formed from less than a full kernel-sized compliment of input image pixels. No output values for these dead rows and columns since they would tend to have less than full amplitude values and would exhibit a "washed-out" look known as convolution edge effects. */ dead_rows = K / 2; dead_cols = K / 2; /* Calculate the normalization factor of the kernel matrix. */ normal_factor = 0; kernel_ptr = kernel; for (r = 0; r < K; r++) { kernel_offset = kernel_ptr; temp1 = *kernel_offset++; for (c = 1; c < K; c++) { normal_factor += abs(temp1); temp1 = *kernel_offset++; } normal_factor += abs(temp1); kernel_ptr += K; } if (normal_factor == 0) normal_factor = 1; /* Convolve the input image with the kernel. */ row = 0; output_image_ptr = output_image; output_image_ptr += (N * dead_rows); for (r = 0; r < N - K + 1; r++) { output_image_offset = output_image_ptr; output_image_offset += dead_cols; col = 0; for (c = 0; c < N - K + 1; c++) { input_image_ptr = input_image; input_image_ptr += (N * row); kernel_ptr = kernel; sum = 0; for (i = 0; i < K; i++) { input_image_offset = input_image_ptr; input_image_offset += col; kernel_offset = kernel_ptr; temp1 = *input_image_offset++; temp2 = *kernel_offset++; for (j = 1; j < K; j++) { sum += temp1 * temp2; temp1 = *input_image_offset++; temp2 = *kernel_offset++; } sum += temp1 * temp2; kernel_ptr += K; input_image_ptr += N; } *output_image_offset++ = (sum / normal_factor); col++; } output_image_ptr += N; row++; } } int input_dsp (int *dest, char *fgg_file) { int success; input_fp=fopen(fgg_file,"rb"); if (input_fp==NULL) { printf ("Error: cannot open input image file %s ...\n", fgg_file); exit(1); } success=fread(dest, N, N*sizeof(int), input_fp); fclose(input_fp); return success; } void output_dsp (int *src) { output_fp=fopen("image_output.bin","wb"); fwrite(src, N, N*sizeof(int), output_fp); fclose(output_fp); }
sph.h
#ifndef SPH_H #define SPH_H #include <omp.h> #include <vector> #include <cstdlib> #include <cmath> #include <glm/vec3.hpp> #include <glm/mat3x3.hpp> #include <glm/geometric.hpp> #include <glm/gtc/matrix_transform.hpp> #include <glm/gtx/scalar_multiplication.hpp> #include "sphparticle.h" #include "sphmap.h" #define LOG 0 #define SHOW_FPRES 0 #define SHOW_FVISC 0 #define PI 3.14159265358979323846264338327950288 using namespace std; using namespace glm; class SPHSystem { private: SpatialHashTable* table; vector<Particle*> Ps; vec3 spos; vec3 size; vec3 gap; vec3 m_d; vec3 container_lb; vec3 container_ub; float k; float g; float density0; float supportRadius; float smoothingRadius; float penalty; float viscosity; float f(float q) { float ans; if (0 <= q && q < 1) ans = 2.0 / 3.0 - pow(q, 2) + 0.5 * pow(q, 3); else if (1 <= q && q < 2) ans = 1.0 / 6.0 * pow(2 - q, 3); else ans = 0.0; return ans * 3.0 / 2.0 / PI; } float df(float q) { float ans; if (0 <= q && q < 1) ans = -2 * q + 3.0 / 2.0 * pow(q, 2); else if (1 <= q && q < 2) ans = -0.5 * pow(2 - q, 2); else ans = 0.0; return ans * 3.0 / 2.0 / PI; } float W(Particle* x_i, Particle* x_j, float smoothingRadius) { float q = distance(x_i->getPosition(), x_j->getPosition()) / smoothingRadius; return 1.0 / pow(smoothingRadius, 3) * f(q); } vec3 dW(Particle* x_i, Particle* x_j, float smoothingRadius) { float q = distance(x_i->getPosition(), x_j->getPosition()) / smoothingRadius; vec3 norm = x_i->getPosition() - x_j->getPosition(); norm = norm / sqrt(dot(norm, norm)); return 1.0 / pow(smoothingRadius, 4) * df(q) * norm; } public: SPHSystem(vec3 pos, vec3 size, vec3 gap, float m_d, vec3 container_lb, vec3 container_ub, float g, float penalty, float k, float density0, float supportRadius, float smoothingRadius, float viscosity) : spos(pos), size(size), gap(gap), m_d(m_d), container_lb(container_lb), container_ub(container_ub), g(g), penalty(penalty), k(k), density0(density0), supportRadius(supportRadius), smoothingRadius(smoothingRadius), viscosity(viscosity) { this->table = new SpatialHashTable(m_d); float mass = pow(supportRadius / 2.0, 3) * density0; // pow(2.0 / 3.0 * smoothingRadius, 3) * density0; vec3 center((size.x-1) * gap.x / 2, (size.y - 1) * gap.y / 2, (size.z - 1) * gap.z / 2); for (int ix = 0; ix < size[0]; ix++) { for (int iy = 0; iy < size[1]; iy++) { for (int iz = 0; iz < size[2]; iz++) { vec3 ppos(gap[0] * ix, gap[1] * iy, gap[2] * iz); if (distance(ppos, center) > (size.x - 1) * gap.x / 2 + 0.1) continue; vec3 rand(0.02 * rand() / RAND_MAX, 0.02 * rand() / RAND_MAX, 0.02 * rand() / RAND_MAX); ppos = ppos + pos + rand; Ps.push_back(new Particle(ppos, mass)); } } } } void build() { table->clear(); table->build(Ps); for (int i = 0; i < Ps.size(); i++) computeNeighborBlocks(Ps[i]); #pragma omp parallel for for (int i = 0; i < Ps.size(); i++) computeNeighbors(Ps[i]); #pragma omp parallel for for (int i = 0; i < Ps.size(); i++) computeBasics(Ps[i]); #pragma omp parallel for for (int i = 0; i < Ps.size(); i++) computeForces(Ps[i]); } void computeNeighborBlocks(Particle* p) { table->neighborBlocks(p); } void computeNeighbors(Particle* p) { table->neighbors(p); p->setBuiltNeighborsFlag(true); } void computeBasics(Particle* p) { float mass = p->getMass(); vec3 pos = p->getPosition(); vector<Particle*>* neighbors = p->getNeighbors(true); // compute density & pressure float dens = 0; #pragma omp parallel for reduction(+:dens) for (int j = 0; j < neighbors->size(); j++) { Particle* p_j = neighbors->at(j); dens += p_j->getMass() * W(p, p_j, smoothingRadius); } float pres = k * (pow(dens / density0, 7) - 1); p->setDensity(dens); p->setPressure(pres); p->setBuiltBasicsFlag(true); } void computeForces(Particle* p) { vec3 pos = p->getPosition(); vec3 vel = p->getVelocity(); float mass = p->getMass(); float dens = p->getDensity(); float pres = p->getPressure(); vector<Particle*>* neighbors = p->getNeighbors(true); // ----------------------- // compute F_pressure // ----------------------- vec3 dPres(0, 0, 0); if (SHOW_FPRES) { vec3 tpos = p->getPosition(); printf("stats of %.0f-%.0f-%.0f:\tmass: %.2f\tdensity: %.2f\tpressure: %.2f\n", tpos.x, tpos.y, tpos.z, mass, dens, pres); } #pragma omp parallel for for (int j = 0; j < neighbors->size(); j++) { Particle* p_j = neighbors->at(j); float mass_j = p_j->getMass(); float dens_j = p_j->getDensity(); float pres_j = p_j->getPressure(); vec3 dW_ij = dW(p, p_j, smoothingRadius); vec3 dPres_j = mass_j * (pres / pow(dens, 2) + pres_j / pow(dens_j, 2)) * dW_ij; dPres += dPres_j; if (SHOW_FPRES) { vec3 tpos = p_j->getPosition(); printf("\tstats of %.0f-%.0f-%.0f:\tmass: %.2f density: %.2f pressure: %.2f param: %.2f dW: %.2f-%.2f-%.2f dPres: %.2f-%.2f-%.2f\n", tpos.x, tpos.y, tpos.z, mass_j, dens_j, pres_j, mass_j * (pres / pow(dens, 2) + pres_j / pow(dens_j, 2)), dW_ij.x, dW_ij.y, dW_ij.z, dPres_j.x, dPres_j.y, dPres_j.z ); } } vec3 Fpres = - mass * dPres; if (SHOW_FPRES) { printf("dpres: %.4f %.4f %.4f\n", dPres.x, dPres.y, dPres.z); printf("Fpres: %.4f %.4f %.4f\n", Fpres.x, Fpres.y, Fpres.z); } // ----------------------- // compute F_viscosity // ----------------------- vec3 ddVisc(0, 0, 0); if (SHOW_FVISC) { vec3 tpos = pos; printf("stats of %.0f-%.0f-%.0f:\tmass: %.2f\tvel: %.2f-%.2f-%.2f\n", tpos.x, tpos.y, tpos.z, mass, vel.x, vel.y, vel.z); } #pragma omp parallel for for (int j = 0; j < neighbors->size(); j++) { Particle* p_j = neighbors->at(j); float mass_j = p_j->getMass(); float dens_j = p_j->getDensity(); vec3 x_ij = pos - p_j->getPosition(); vec3 v_ij = vel - p_j->getVelocity(); vec3 dW_ij = dW(p, p_j, smoothingRadius); vec3 ddVisc_j = mass_j / dens_j * v_ij * (dot(x_ij, dW_ij) / (dot(x_ij, x_ij) + 0.01 * pow(smoothingRadius, 2))); ddVisc += ddVisc_j; if (SHOW_FVISC) { vec3 tpos = p_j->getPosition(); printf("\tstats of %.0f-%.0f-%.0f:\tmass: %.2f density: %.2f dW: %.2f-%.2f-%.2f ddVisc: %.2f-%.2f-%.2f\n", tpos.x, tpos.y, tpos.z, mass_j, dens_j, dW_ij.x, dW_ij.y, dW_ij.z, ddVisc_j.x, ddVisc_j.y, ddVisc_j.z ); } } vec3 Fvisc = 2 * mass * viscosity * ddVisc; if (SHOW_FVISC) { printf("ddVisc: %.4f %.4f %.4f\n", ddVisc.x, ddVisc.y, ddVisc.z); printf("Fvisc: %.4f %.4f %.4f\n", Fvisc.x, Fvisc.y, Fvisc.z); } p->setFpres(Fpres); p->setFvisc(Fvisc); p->setBuiltForcesFlag(true); } void applyGForces() { vec3 g(0.0f, -g, 0.0f); #pragma omp parallel for for (int i = 0 ; i < Ps.size() ; i++) Ps[i]->applyPermForce(Ps[i]->getMass() * g); } void applyTempForces(vec3& f) { #pragma omp parallel for for (int i = 0; i < Ps.size(); i++) Ps[i]->applyTempForce(f); } void clearTempForces() { #pragma omp parallel for for (int i = 0; i < Ps.size(); i++) Ps[i]->clearTempForce(); } void applySPHForces() { #pragma omp parallel for for (int i = 0 ; i < Ps.size() ; i++) { Ps[i]->applyTempForce(Ps[i]->getFpres()); Ps[i]->applyTempForce(Ps[i]->getFvisc()); } } void setContainer(vec3 container_lb, vec3 container_ub) { this->container_lb = container_lb; this->container_ub = container_ub; } void applyPenaltyForces() { #pragma omp parallel for for (int i = 0; i < Ps.size(); i++) { vec3 pos = Ps[i]->getPosition(); if (pos.y <= container_lb.y) { vec3 penalty_force(0, -penalty * (pos.y - container_lb.y), 0); Ps[i]->applyTempForce(penalty_force); } if (pos.y > container_ub.y) { vec3 penalty_force(0, -penalty * (pos.y - container_ub.y), 0); Ps[i]->applyTempForce(penalty_force); } if (pos.x <= container_lb.x) { vec3 penalty_force(-penalty * (pos.x - container_lb.x), 0, 0); Ps[i]->applyTempForce(penalty_force); } if (pos.x > container_ub.x) { vec3 penalty_force(-penalty * (pos.x - container_ub.x), 0, 0); Ps[i]->applyTempForce(penalty_force); } if (pos.z <= container_lb.z) { vec3 penalty_force(0, 0, -penalty * (pos.z - container_lb.z)); Ps[i]->applyTempForce(penalty_force); } if (pos.z > container_ub.z) { vec3 penalty_force(0, 0, -penalty * (pos.z - container_ub.z)); Ps[i]->applyTempForce(penalty_force); } } } int getSize() { return Ps.size(); } vector<Particle*>* getParticles() { return &Ps; } void getPositions(float* pos) { #pragma omp parallel for for (int i = 0 ; i < Ps.size() ; i++) { vec3 ppos = Ps[i]->getPosition(); for (int j = 0 ; j < 3 ; j++) pos[3 * i + j] = ppos[j]; } } void getVelocities(float* vel) { #pragma omp parallel for for (int i = 0 ; i < Ps.size() ; i++) { vec3 pvel = Ps[i]->getVelocity(); for (int j = 0 ; j < 3 ; j++) vel[3 * i + j] = pvel[j]; } } void getPosAcc(float* posacc) { #pragma omp parallel for for (int i = 0 ; i < Ps.size() ; i++) { vec3 ppos = Ps[i]->getPosition(); vec3 pacc = ppos + Ps[i]->computeTempAcceleration(); for (int j = 0 ; j < 3 ; j++) posacc[6 * i + 0 + j] = ppos[j]; for (int j = 0 ; j < 3 ; j++) posacc[6 * i + 3 + j] = pacc[j]; } } void printPositions() { printf("\n"); for (int i = 0 ; i < Ps.size() ; i++) { vec3 pos = Ps[i]->getPosition(); printf("%.3f-%.3f-%.3f ", pos[0], pos[1], pos[2]); } } }; #endif
cgbtrs.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgbtrs.c, normal z -> c, Fri Sep 28 17:38:04 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_gbtrs * * Solves a system of linear equations A * X = B with triangular factorization * computed by plasma_cpbtrf or plasma_cgbtrf. * ******************************************************************************* * * @param[in] trans * - PlasmaNoTrans: A is not transposed, * - PlasmaTrans: A is transposed, * - PlasmaConjTrans: A is conjugate transposed. * * @param[in] n * The order of the matrix A. n >= 0. * * @param[in] kl * The number of subdiagonals within the band of A. kl >= 0. * * @param[in] ku * The number of superdiagonals within the band of A. ku >= 0. * * @param[in] nrhs * The number of right hand sides, i.e., the number of * columns of the matrix B. nrhs >= 0. * * @param[in,out] AB * Details of the LU factorization of the band matrix A, as * computed by plasma_cgbtrf. * * @param[in] ldab * The leading dimension of the array AB. * * @param[in] ipiv * The pivot indices; for 1 <= i <= min(m,n), row i of the * matrix was interchanged with row ipiv(i). * * @param[in,out] B * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_cgbtrs * @sa plasma_cgbtrs * @sa plasma_dgbtrs * @sa plasma_sgbtrs * @sa plasma_cpbtrf * ******************************************************************************/ int plasma_cgbtrs(plasma_enum_t trans, int n, int kl, int ku, int nrhs, plasma_complex32_t *pAB, int ldab, int *ipiv, plasma_complex32_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((trans != PlasmaNoTrans) && (trans != PlasmaTrans) && (trans != PlasmaConjTrans)) { plasma_error("illegal value of trans"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (kl < 0) { plasma_error("illegal value of kd"); return -3; } if (ku < 0) { plasma_error("illegal value of ku"); return -4; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -5; } if (ldab < imax(1, 1+kl+ku)) { plasma_error("illegal value of ldab"); return -7; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -10; } // quick return if (imax(n, nrhs) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_gbtrf(plasma, PlasmaComplexFloat, n, kl+ku+1); // Set tiling parameters. int nb = plasma->nb; // Initialize tile matrix descriptors. plasma_desc_t AB; plasma_desc_t B; int tku = (ku+kl+nb-1)/nb; // number of tiles in upper band (not including diagonal) int tkl = (kl+nb-1)/nb; // number of tiles in lower band (not including diagonal) int lm = (tku+tkl+1)*nb; // since we use cgetrf on panel, we pivot back within panel. // this could fill the last tile of the panel, // and we need extra NB space on the bottom int retval; retval = plasma_desc_general_band_create(PlasmaComplexFloat, PlasmaGeneral, nb, nb, lm, n, 0, 0, n, n, kl, ku, &AB); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_band_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, n, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&AB); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_cpb2desc(pAB, ldab, AB, &sequence, &request); plasma_omp_cge2desc(pB, ldb, B, &sequence, &request); // Call the tile async function. plasma_omp_cgbtrs(trans, AB, ipiv, B, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_cdesc2ge(B, pB, ldb, &sequence, &request); } // implicit synchronization // Free matrix A in tile layout. plasma_desc_destroy(&AB); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_gbtrs * * Solves a system of linear equations using previously * computed factorization. * Non-blocking tile version of plasma_cgbtrs(). * May return before the computation is finished. * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] trans * - PlasmaNoTrans: A is not transposed, * - PlasmaTrans: A is transposed, * - PlasmaConjTrans: A is conjugate transposed. * * @param[in] AB * The triangular factor U or L from the Cholesky factorization * A = U^H*U or A = L*L^H, computed by plasma_cpotrf. * * @param[in,out] B * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_cgbtrs * @sa plasma_omp_cgbtrs * @sa plasma_omp_cgbtrs * @sa plasma_omp_dgbtrs * @sa plasma_omp_sgbtrs * @sa plasma_omp_cgbtrf * ******************************************************************************/ void plasma_omp_cgbtrs(plasma_enum_t trans, plasma_desc_t AB, int *ipiv, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((trans != PlasmaNoTrans) && (trans != PlasmaTrans) && (trans != PlasmaConjTrans)) { plasma_error("illegal value of trans"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(AB) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (AB.n == 0 || B.n == 0) return; // Call the parallel functions. if (trans == PlasmaNoTrans) { plasma_pctbsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit, 1.0, AB, B, ipiv, sequence, request); plasma_pctbsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit, 1.0, AB, B, ipiv, sequence, request); } else { plasma_pctbsm(PlasmaLeft, PlasmaUpper, trans, PlasmaNonUnit, 1.0, AB, B, ipiv, sequence, request); plasma_pctbsm(PlasmaLeft, PlasmaLower, trans, PlasmaUnit, 1.0, AB, B, ipiv, sequence, request); } }
mgm.c
#include <stdio.h> #include <math.h> #include "utils.h" #include "grid.h" #include "intergrid.h" #include "smoothers.h" typedef enum {V,W,S} CYCLE; typedef enum {JACOBI,CHEBYSHEV} METHOD; void get_options( int *iters, int *nc, double *mineigen, int *lmax, int *nu1, int *nu2, int *nuc, CYCLE *cycle, METHOD *smoother, int *print_res ); double residual( GRID u, GRID f ); void print_errors( GRID u, GRID f, GRID uex, int it ); void print_header( double mineigen, int lmax, int nc, int nu1, int nu2, int nuc, CYCLE cycle, METHOD smoother ); long long int defect( GRID u, GRID f, GRID d ); long long int smooth( METHOD smoother, GRID &u, GRID &f, GRID &w, int nu, double mineigen ); long long int mgrid( GRIDLIST u, GRIDLIST f, GRIDLIST w, int l, int nu1, int nu2, int nuc, CYCLE cycle, METHOD smoother, double mineigen ); /* void defect( GRID u, GRID f, GRID d ); void smooth( METHOD smoother, GRID &u, GRID &f, GRID &w, int nu, double mineigen ); void mgrid( GRIDLIST u, GRIDLIST f, GRIDLIST w, int l, int nu1, int nu2, int nuc, CYCLE cycle, METHOD smoother, double mineigen ); */ void get_options( int *iters, int *nc, double *mineigen, int *lmax, int *nu1, int *nu2, int *nuc, CYCLE *cycle, METHOD *smoother, int *print_res ) { char cyc[2],inp[2]; printf("\n\n# ITERATIEVE METHODES voor het OPLOSSEN van de POISSON VERGELIJKING\n\n"); printf("\n# aantal (inwendige) roosterlijnen op grofste rooster: "); scanf("%d",nc); printf("# aantal bijkomende fijne roosters: "); scanf("%d",lmax); printf("# multigrid cycle type (V), (W): " ); scanf("%1s",cyc); switch(cyc[0]) { case 'V': case 'v': *cycle=V; break; case 'W': case 'w': *cycle=W; break; case 'S': case 's': *cycle=S; break; default : print_msg("verkeerd cycle type"); } printf("# multigrid smoother (J for Jacobi), (C for Chebyshev): " ); scanf("%1s",cyc); switch(cyc[0]) { case 'J' : case 'j' : *smoother=JACOBI; break; /* case 'C' : case 'c' : { */ /* *smoother=CHEBYSHEV; */ /* printf("# l_min = c l_max, geef c: "); */ /* scanf("%lf",mineigen); */ /* break; */ /* } */ default : print_msg("verkeerde smoother type"); } printf("# aantal pre-smoothing stappen: "); scanf("%d",nu1); printf("# aantal post-smoothing stappen: "); scanf("%d",nu2); printf("# aantal grof-rooster-relaxatie stappen: "); scanf("%d",nuc); printf("# aantal iteraties op het fijnste rooster: "); scanf("%d",iters); printf("# Tijden opmeten ? (j/n) : "); scanf("%1s", inp); switch(inp[0]) { case 'j': case 'J': *print_res = 0; break; case 'n': case 'N': *print_res = 1; break; default : print_msg("verkeerde invoer"); } } double residual( GRID u, GRID f ) { if (u.n != f.n) print_msg("residual: wrong grid-sizes"); double tmp, res = 0.0; double invhh = 1.0/(u.h*u.h); #pragma omp parallel for collapse(3) reduction(+:res) private(tmp) for (int i=1; i<=u.n; i++) for (int j=1; j<=u.n; j++) for (int k=1; k<=u.n; k++) { tmp = (u.p[i-1][j][k]+ u.p[i+1][j][k]+ u.p[i][j-1][k]+ u.p[i][j+1][k]+ u.p[i][j][k-1]+ u.p[i][j][k+1]- 6.0*u.p[i][j][k])*invhh-f.p[i][j][k]; res += tmp*tmp; } res = sqrt(res)/u.n; return res; } double oldresid,olderr; void print_errors( GRID u, GRID f, GRID uex, int it ) { double rhoresid,rhoerr; double resid = residual(u,f); double err = error(u,uex); if (it==0) { printf("\n# %dx%dx%d : %-3d : %20e %20e\n",u.n,u.n,u.n,it,err,resid); } else { rhoresid = (oldresid==0.0) ? 1.0 : resid/oldresid; rhoerr = (olderr ==0.0) ? 1.0 : err/olderr; printf("# %dx%dx%d : %-3d : %20e %20e : %6f %6f \n",u.n,u.n,u.n,it, err,resid,rhoerr,rhoresid); } oldresid = resid; olderr = err; } void print_header( double mineigen, int lmax, int nc, int nu1, int nu2, int nuc, CYCLE cycle, METHOD smoother ) { char cyc[] = {'V','W','S'}; char* str[] = {"JACOBI", "CHEBYSHEV"}; printf("\n\n\n# %s smoother ", str[(int)smoother]); printf(": levels: 0..%-2d, coarse grid: %dx%dx%d, %c cycle, lambda_min= %f lambda_max\n",lmax,nc,nc,nc,cyc[(int)cycle],mineigen); printf("# pre-smoothing: %-2d, post-smoothing: %-2d, coarse relaxation: %-2d", nu1,nu2,nuc); printf("\n\n# discr iter error residual rho-error rho-residual\n" ); printf( "# ------ ---- ----- -------- --------- ------------\n" ); } //void defect( GRID u, GRID f, GRID d ) { long long int defect( GRID u, GRID f, GRID d ) { long long int count = 0; if (u.n != f.n || u.n != d.n) print_msg("residual: wrong grid sizes"); double invhh = 1.0/(u.h*u.h); #pragma omp parallel for collapse(3) for (int i=1; i<=u.n; i++) for (int j=1; j<=u.n; j++) for (int k=1; k<=u.n; k++) d.p[i][j][k]=f.p[i][j][k]-(6.0*u.p[i][j][k]- u.p[i-1][j][k]- u.p[i+1][j][k]- u.p[i][j-1][k]- u.p[i][j+1][k]- u.p[i][j][k-1]- u.p[i][j][k+1] )*invhh; // count++; return count; } //void smooth( METHOD smoother, GRID &u, GRID &f, GRID &w, int nu, double mineigen ) { long long int smooth( METHOD smoother, GRID &u, GRID &f, GRID &w, int nu, double mineigen ) { double lmax = 8.0*(u.n+1)*(u.n+1); double lmin = mineigen * lmax; long long int count = 0; //if (smoother == JACOBI) count = jacobi(u,f,w,nu); //else if (smoother == CHEBYSHEV) //chebyshev(u,f,w,nu,lmin,lmax); return count; } //void mgrid( GRIDLIST u, GRIDLIST f, GRIDLIST w, int l, int nu1, int nu2, int nuc, CYCLE cycle, METHOD smoother, double mineigen ) { long long int mgrid( GRIDLIST u, GRIDLIST f, GRIDLIST w, int l, int nu1, int nu2, int nuc, CYCLE cycle, METHOD smoother, double mineigen ) { long long int count = 0; if (l==0) { u[0].p[1][1][1] = (u[0].h)*(u[0].h) / 6.0 * f[0].p[1][1][1]; count += smooth(smoother,u[0],f[0],w[0],nuc,mineigen); } else { count += smooth(smoother,u[l],f[l],w[l],nu1,mineigen); count += defect(u[l],f[l],w[l]); count += restriction(w[l],f[l-1]); if (cycle == V) { zero_grid(u[l-1]); count += mgrid(u,f,w,l-1,nu1,nu2,nuc,cycle,smoother,mineigen); } else if (cycle == W) { zero_grid(u[l-1]); count += mgrid(u,f,w,l-1,nu1,nu2,nuc,cycle,smoother,mineigen); count += mgrid(u,f,w,l-1,nu1,nu2,nuc,cycle,smoother,mineigen); } else if (cycle == S) { if (l==1) { zero_grid(u[l-1]); mgrid(u,f,w,l-1,nu1,nu2,nuc,cycle,smoother,mineigen); } else { count += restriction(f[l-1],f[l-2]); zero_grid(u[l-2]); count += mgrid(u,f,w,l-2,nu1,nu2,nuc,cycle,smoother,mineigen); count += prolong(u[l-1],u[l-2]); } } count += prolong(w[l],u[l-1]); count += correct(u[l],w[l]); count += smooth(smoother,u[l],f[l],w[l],nu2,mineigen); } return count; } int main( void ) { int lmax,nc,nu1,nu2,nuc,iters,print_res; double tstart, ttotal, tmin; double mineigen; CYCLE cycle; METHOD smoother; GRIDLIST u,f,w,uex; get_options(&iters,&nc,&mineigen,&lmax,&nu1,&nu2,&nuc,&cycle,&smoother,&print_res); print_header(mineigen,lmax,nc,nu1,nu2,nuc,cycle,smoother); create_grid_list(&uex,nc,lmax); create_grid_list(&u,nc,lmax); create_grid_list(&f,nc,lmax); create_grid_list(&w,nc,lmax); long long int count = 0; tmin = 1000.0; int s, i; for (s=0; s<1; s++){ init_grid(u[lmax], 1.0); init_border(u[lmax]); init_border(w[lmax]); init_rhs(f[lmax]); init_solution(uex[lmax]); print_errors(u[lmax],f[lmax],uex[lmax],0); tstart = get_time(); i=0; while (i < iters && error(u[lmax],uex[lmax]) > 1e-12) { count += mgrid(u,f,w,lmax,nu1,nu2,nuc,cycle,smoother,mineigen); i++; if (print_res) print_errors(u[lmax],f[lmax],uex[lmax],i); } ttotal = (get_time()-tstart); if (ttotal < tmin) tmin = ttotal; } if (!print_res) printf("\n# totale berekeningstijd: \n%7f\t%i #sec.\n\n", ttotal, i); free_grid_list(&uex,lmax); free_grid_list(&u,lmax); free_grid_list(&f,lmax); free_grid_list(&w,lmax); printf("%lld\n", count); }
GB_unaryop__minv_int8_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int8_bool // op(A') function: GB_tran__minv_int8_bool // C type: int8_t // A type: bool // cast: int8_t cij = (int8_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 8) #define GB_ATYPE \ bool #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 8) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT8 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int8_bool ( int8_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int8_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
viterbi_decode_op.h
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <memory> #include <string> #include <vector> #include "paddle/fluid/operators/controlflow/compare_op.h" #include "paddle/fluid/operators/elementwise/elementwise_functor.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/operators/gather.h" #include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/transpose_op.h" #include "paddle/fluid/operators/unique_op.h" #ifdef PADDLE_WITH_MKLML #include <omp.h> #endif namespace paddle { namespace operators { using LoDTensor = framework::LoDTensor; template <typename DeviceContext, typename T, typename IndType> struct Argmax { void operator()(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* out_idx, Tensor* out, int axis) { framework::DDim input_dims = input.dims(); int64_t pre = 1; int64_t post = 1; int64_t n = input_dims[axis]; for (int i = 0; i < axis; i++) { pre *= input_dims[i]; } for (int i = axis + 1; i < input_dims.size(); i++) { post *= input_dims[i]; } int64_t height = pre * post; int64_t width = n; const T* in_data = input.data<T>(); IndType* out_idx_data = out_idx->data<IndType>(); T* out_data = out->data<T>(); // Reduce #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int64_t i = 0; i < height; ++i) { int64_t h = i / post; int64_t w = i % post; IndType max_idx = -1; T max_value = (std::numeric_limits<T>::lowest)(); // for windows compile for (int64_t j = 0; j < width; ++j) { if (in_data[h * width * post + j * post + w] > max_value) { max_value = in_data[h * width * post + j * post + w]; max_idx = j; } } out_data[i] = max_value; out_idx_data[i] = max_idx; } } }; template <typename DeviceContext> struct ARange { void operator()(const DeviceContext& dev_ctx, int64_t* data, int end, int64_t scale) { for (int i = 0; i < end; ++i) { data[i] = i * scale; } } }; template <typename DeviceContext, typename T> struct GetMaxValue { void operator()(const DeviceContext& dev_ctx, const Tensor& input, T* max_value) { auto input_ptr = input.data<T>(); auto num = input.numel(); *max_value = *std::max_element(input_ptr, input_ptr + num); } }; template <typename DeviceContext, typename T, typename IndexT = int> struct Gather { void operator()(const DeviceContext& ctx, const Tensor& src, const Tensor& index, Tensor* output) { CPUGather<T, IndexT>(ctx, src, index, output); } }; template <typename T, typename Functor, typename OutT = T> void SameDimsBinaryOP(const Tensor& lhs, const Tensor& rhs, Tensor* out) { const T* lhs_ptr = lhs.data<T>(); const T* rhs_ptr = rhs.data<T>(); OutT* out_ptr = out->data<OutT>(); Functor functor; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < out->numel(); ++i) { out_ptr[i] = functor(lhs_ptr[i], rhs_ptr[i]); } } template <typename DeviceContext, template <typename InT, typename OutT> typename CompareFunctor, typename T> struct GetMask { void operator()(const framework::ExecutionContext& ctx, const Tensor& lhs, const Tensor& rhs, Tensor* mask) { SameDimsBinaryOP<int64_t, CompareFunctor<int64_t, T>, T>(lhs, rhs, mask); } }; template <bool is_multi_threads> struct GetInputIndex { void operator()(const std::vector<int>& lhs_dims, const std::vector<int>& rhs_dims, const std::vector<int>& output_dims, const std::vector<int>& lhs_strides, const std::vector<int>& rhs_strides, const std::vector<int>& output_strides, int output_idx, int* index_array, int* lhs_idx, int* rhs_idx) { int out_dims_size = output_strides.size(); for (int j = 0; j < out_dims_size; ++j) { int curr_idx = output_idx / output_strides[j]; output_idx %= output_strides[j]; *lhs_idx += (lhs_dims[j] > 1) ? curr_idx * lhs_strides[j] : 0; *rhs_idx += (rhs_dims[j] > 1) ? curr_idx * rhs_strides[j] : 0; } } }; template <> struct GetInputIndex<false> { void operator()(const std::vector<int>& lhs_dims, const std::vector<int>& rhs_dims, const std::vector<int>& output_dims, const std::vector<int>& lhs_strides, const std::vector<int>& rhs_strides, const std::vector<int>& output_strides, int output_idx, int* index_array, int* lhs_idx, int* rhs_idx) { int out_dims_size = output_strides.size(); *lhs_idx = pten::GetElementwiseIndex(lhs_dims.data(), out_dims_size, index_array); *rhs_idx = pten::GetElementwiseIndex(rhs_dims.data(), out_dims_size, index_array); pten::UpdateElementwiseIndexArray(output_dims.data(), out_dims_size, index_array); } }; template <typename T, typename Functor, bool is_multi_threads = false> void SimpleBroadcastBinaryOP(const Tensor& lhs, const Tensor& rhs, Tensor* out) { const T* lhs_ptr = lhs.data<T>(); const T* rhs_ptr = rhs.data<T>(); T* out_ptr = out->data<T>(); int out_size = static_cast<int>(out->dims().size()); std::vector<int> out_dims(out_size); std::vector<int> lhs_dims(out_size); std::vector<int> rhs_dims(out_size); std::copy(lhs.dims().Get(), lhs.dims().Get() + out_size, lhs_dims.data()); std::copy(rhs.dims().Get(), rhs.dims().Get() + out_size, rhs_dims.data()); std::copy(out->dims().Get(), out->dims().Get() + out_size, out_dims.data()); std::vector<int> output_strides(out_size, 1); std::vector<int> lhs_strides(out_size, 1); std::vector<int> rhs_strides(out_size, 1); std::vector<int> index_array(out_size, 0); // calculate strides for (int i = out_size - 2; i >= 0; --i) { output_strides[i] = output_strides[i + 1] * out_dims[i + 1]; lhs_strides[i] = lhs_strides[i + 1] * lhs_dims[i + 1]; rhs_strides[i] = rhs_strides[i + 1] * rhs_dims[i + 1]; } Functor functor; GetInputIndex<is_multi_threads> get_input_index; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < out->numel(); ++i) { int lhs_idx = 0; int rhs_idx = 0; get_input_index(lhs_dims, rhs_dims, out_dims, lhs_strides, rhs_strides, output_strides, i, index_array.data(), &lhs_idx, &rhs_idx); out_ptr[i] = functor(lhs_ptr[lhs_idx], rhs_ptr[rhs_idx]); } } template <typename DeviceContext, template <typename T> typename BinaryFunctor, typename T> struct BinaryOperation { void operator()(const DeviceContext& dev_ctx, const Tensor& lhs, const Tensor& rhs, Tensor* output) { if (lhs.dims() == rhs.dims()) { SameDimsBinaryOP<T, BinaryFunctor<T>>(lhs, rhs, output); } else { bool is_multi_threads = false; #ifdef PADDLE_WITH_MKLML if (omp_get_max_threads() > 1) { is_multi_threads = true; } #endif if (is_multi_threads) { SimpleBroadcastBinaryOP<T, BinaryFunctor<T>, true>(lhs, rhs, output); } else { SimpleBroadcastBinaryOP<T, BinaryFunctor<T>, false>(lhs, rhs, output); } } } }; class TensorBuffer { public: explicit TensorBuffer(const LoDTensor& in) : buffer_(in), offset_(0) { buffer_.Resize({buffer_.numel()}); } Tensor GetBufferBlock(std::initializer_list<int64_t> shape) { int64_t size = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int64_t>()); Tensor block = buffer_.Slice(offset_, offset_ + size); offset_ += size; block.Resize(shape); return block; } private: LoDTensor buffer_; // need to resize 1-D Tensor int offset_; }; template <typename DeviceContext, typename T> class ViterbiDecodeKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { bool include_bos_eos_tag = ctx.Attr<bool>("include_bos_eos_tag"); auto& dev_ctx = ctx.template device_context<DeviceContext>(); auto curr_place = ctx.GetPlace(); auto* input = ctx.Input<Tensor>("Input"); auto batch_size = static_cast<int>(input->dims()[0]); auto seq_len = static_cast<int>(input->dims()[1]); auto n_labels = static_cast<int>(input->dims()[2]); pten::funcs::SetConstant<DeviceContext, T> float_functor; pten::funcs::SetConstant<DeviceContext, int64_t> int_functor; std::vector<Tensor> historys; // We create tensor buffer in order to avoid allocating memory frequently // 10 means allocate 10*batch_size bytes memory, such as int_mask, zero... int buffer_size = batch_size * (n_labels + 1) * seq_len + 10 * batch_size; LoDTensor int_buffer; int_buffer.Resize(framework::make_ddim({buffer_size})); int_buffer.mutable_data<int64_t>(ctx.GetPlace()); TensorBuffer int_tensor_buffer(int_buffer); // create float tensor buffer // 10 means allocate 10*batch_size*n_labels bytes, such as alpha, alpha_max buffer_size = batch_size * (seq_len + 10) * n_labels + (batch_size + 2) * n_labels * n_labels; LoDTensor float_buffer; float_buffer.Resize(framework::make_ddim({buffer_size})); float_buffer.mutable_data<T>(ctx.GetPlace()); TensorBuffer float_tensor_buffer(float_buffer); auto* length = ctx.Input<Tensor>("Length"); Tensor left_length = int_tensor_buffer.GetBufferBlock({batch_size, 1}); framework::TensorCopy(*length, curr_place, dev_ctx, &left_length); int64_t max_seq_len = 0; GetMaxValue<DeviceContext, int64_t> get_max_value; get_max_value(dev_ctx, left_length, &max_seq_len); auto* scores = ctx.Output<Tensor>("Scores"); scores->mutable_data<T>(curr_place); auto* path = ctx.Output<Tensor>("Path"); path->Resize({batch_size, max_seq_len}); path->mutable_data<int64_t>(curr_place); Tensor tpath = int_tensor_buffer.GetBufferBlock({max_seq_len, batch_size}); auto batch_path = Unbind(tpath); for (auto it = batch_path.begin(); it != batch_path.end(); ++it) { it->Resize({batch_size}); } // create and init required tensor Tensor input_exp = float_tensor_buffer.GetBufferBlock({seq_len, batch_size, n_labels}); TransCompute<DeviceContext, T>(3, dev_ctx, *input, &input_exp, {1, 0, 2}); auto* transition = ctx.Input<Tensor>("Transition"); Tensor trans_exp = float_tensor_buffer.GetBufferBlock({n_labels, n_labels}); framework::TensorCopy(*transition, curr_place, dev_ctx, &trans_exp); trans_exp.Resize({1, n_labels, n_labels}); Tensor alpha = float_tensor_buffer.GetBufferBlock({batch_size, n_labels}); Tensor zero = int_tensor_buffer.GetBufferBlock({batch_size, 1}); int_functor(dev_ctx, &zero, 0); Tensor one = int_tensor_buffer.GetBufferBlock({batch_size, 1}); int_functor(dev_ctx, &one, 1); Tensor float_one = float_tensor_buffer.GetBufferBlock({batch_size, 1}); float_functor(dev_ctx, &float_one, static_cast<T>(1.0)); Tensor alpha_trn_sum = float_tensor_buffer.GetBufferBlock({batch_size, n_labels, n_labels}); Tensor alpha_max = float_tensor_buffer.GetBufferBlock({batch_size, n_labels}); Tensor alpha_argmax = int_tensor_buffer.GetBufferBlock({seq_len, batch_size, n_labels}); auto alpha_argmax_unbind = Unbind(alpha_argmax); Tensor alpha_nxt = float_tensor_buffer.GetBufferBlock({batch_size, n_labels}); Tensor int_mask = int_tensor_buffer.GetBufferBlock({batch_size}); Tensor zero_len_mask = int_tensor_buffer.GetBufferBlock({batch_size}); Tensor float_mask = float_tensor_buffer.GetBufferBlock({batch_size, 1}); Tensor stop_trans = float_tensor_buffer.GetBufferBlock({1, 1, n_labels}); Tensor start_trans = float_tensor_buffer.GetBufferBlock({1, 1, n_labels}); Tensor rest_trans = float_tensor_buffer.GetBufferBlock({1, n_labels - 2, n_labels}); Tensor last_ids = int_tensor_buffer.GetBufferBlock({batch_size}); Tensor last_ids_tmp = int_tensor_buffer.GetBufferBlock({batch_size}); Tensor batch_offset = int_tensor_buffer.GetBufferBlock({batch_size}); Tensor gather_idx = int_tensor_buffer.GetBufferBlock({batch_size}); std::vector<const Tensor*> shape{&rest_trans, &stop_trans, &start_trans}; std::vector<Tensor*> outputs{&rest_trans, &stop_trans, &start_trans}; math::SplitFunctor<DeviceContext, T> split_functor; split_functor(dev_ctx, trans_exp, shape, 1, &outputs); stop_trans.Resize({1, n_labels}); start_trans.Resize({1, n_labels}); auto logit0 = input_exp.Slice(0, 1); logit0.Resize({batch_size, n_labels}); BinaryOperation<DeviceContext, AddFunctor, T> AddFloat; BinaryOperation<DeviceContext, AddFunctor, int64_t> AddInt; BinaryOperation<DeviceContext, MulFunctor, T> MulFloat; BinaryOperation<DeviceContext, MulFunctor, int64_t> MulInt; BinaryOperation<DeviceContext, SubFunctor, T> SubFloat; BinaryOperation<DeviceContext, SubFunctor, int64_t> SubInt; if (include_bos_eos_tag) { AddFloat(dev_ctx, logit0, start_trans, &alpha); GetMask<DeviceContext, EqualFunctor, T>()(ctx, left_length, one, &float_mask); MulFloat(dev_ctx, stop_trans, float_mask, &alpha_nxt); AddFloat(dev_ctx, alpha, alpha_nxt, &alpha); } else { alpha = logit0; } SubInt(dev_ctx, left_length, one, &left_length); Argmax<DeviceContext, T, int64_t> argmax; for (int64_t i = 1; i < max_seq_len; ++i) { Tensor logit = input_exp.Slice(i, i + 1); logit.Resize({batch_size, n_labels}); Tensor& alpha_exp = alpha.Resize({batch_size, n_labels, 1}); AddFloat(dev_ctx, alpha_exp, trans_exp, &alpha_trn_sum); auto alpha_argmax_temp = alpha_argmax_unbind[i - 1]; alpha_argmax_temp.Resize({batch_size, n_labels}); argmax(ctx, alpha_trn_sum, &alpha_argmax_temp, &alpha_max, 1); historys.emplace_back(alpha_argmax_temp); AddFloat(dev_ctx, alpha_max, logit, &alpha_nxt); alpha.Resize({batch_size, n_labels}); // mask = paddle.cast((left_length > 0), dtype='float32') // alpha = mask * alpha_nxt + (1 - mask) * alpha GetMask<DeviceContext, GreaterThanFunctor, T>()(ctx, left_length, zero, &float_mask); // alpha_nxt = mask * alpha_nxt MulFloat(dev_ctx, alpha_nxt, float_mask, &alpha_nxt); // inv_mask = 1 - mask SubFloat(dev_ctx, float_one, float_mask, &float_mask); // alpha = (1 - mask) * alpha MulFloat(dev_ctx, alpha, float_mask, &alpha); // alpha += alpha_nxt AddFloat(dev_ctx, alpha, alpha_nxt, &alpha); if (include_bos_eos_tag) { GetMask<DeviceContext, EqualFunctor, T>()(ctx, left_length, one, &float_mask); // alpha += mask * trans_exp[:, self.stop_idx] MulFloat(dev_ctx, stop_trans, float_mask, &alpha_nxt); AddFloat(dev_ctx, alpha, alpha_nxt, &alpha); } SubInt(dev_ctx, left_length, one, &left_length); } argmax(ctx, alpha, &last_ids, scores, 1); left_length.Resize({batch_size}); GetMask<DeviceContext, GreaterEqualFunctor, int64_t>()(ctx, left_length, zero, &int_mask); // last_ids_update = last_ids * tag_mask int last_ids_index = 1; int actual_len = (std::min)(seq_len, static_cast<int>(max_seq_len)); MulInt(dev_ctx, last_ids, int_mask, &batch_path[actual_len - last_ids_index]); // The algorithm below can refer to // https://github.com/PaddlePaddle/PaddleNLP/blob/develop/paddlenlp/layers/crf.py#L438 ARange<DeviceContext> arange; arange(dev_ctx, batch_offset.data<int64_t>(), batch_size, n_labels); Gather<DeviceContext, int64_t, int64_t> gather; for (auto hist = historys.rbegin(); hist != historys.rend(); ++hist) { ++last_ids_index; AddInt(dev_ctx, left_length, one, &left_length); AddInt(dev_ctx, batch_offset, last_ids, &gather_idx); Tensor& last_ids_update = batch_path[actual_len - last_ids_index]; hist->Resize({batch_size * n_labels}); gather(dev_ctx, *hist, gather_idx, &last_ids_update); GetMask<DeviceContext, GreaterThanFunctor, int64_t>()(ctx, left_length, zero, &int_mask); MulInt(dev_ctx, last_ids_update, int_mask, &last_ids_update); GetMask<DeviceContext, EqualFunctor, int64_t>()(ctx, left_length, zero, &zero_len_mask); MulInt(dev_ctx, last_ids, zero_len_mask, &last_ids_tmp); SubInt(dev_ctx, one, zero_len_mask, &zero_len_mask); MulInt(dev_ctx, last_ids_update, zero_len_mask, &last_ids_update); AddInt(dev_ctx, last_ids_update, last_ids_tmp, &last_ids_update); GetMask<DeviceContext, LessThanFunctor, int64_t>()(ctx, left_length, zero, &int_mask); MulInt(dev_ctx, last_ids, int_mask, &last_ids); AddInt(dev_ctx, last_ids_update, last_ids, &last_ids); } TransCompute<DeviceContext, int64_t>(2, dev_ctx, tpath, path, {1, 0}); } }; } // namespace operators } // namespace paddle
sapG_fmt_plug.c
/* * this is a SAP PASSCODE (CODEVN G) plugin for john the ripper. * tested on linux/x86 only, rest is up to you.. at least, someone did the reversing :-) * * please note: this code is in a "works for me"-state, feel free to modify/speed up/clean/whatever it... * * (c) x7d8 sap loverz, public domain, btw * cheers: see test-cases. * * Heavily modified by magnum 2011-2012 for performance and for SIMD, OMP and * encodings support. Copyright (c) 2011, 2012 magnum, and it is hereby released * to the general public under the following terms: Redistribution and use in * source and binary forms, with or without modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_sapG; #elif FMT_REGISTERS_H john_register_one(&fmt_sapG); #else #include <string.h> #include <ctype.h> #include "arch.h" #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1) #endif #include "simd-intrinsics.h" #include "misc.h" #include "common.h" #include "formats.h" #include "sha.h" #include "options.h" #include "unicode.h" #include "johnswap.h" #define FORMAT_LABEL "sapg" #define FORMAT_NAME "SAP CODVN F/G (PASSCODE)" #define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME static unsigned int omp_t = 1; #if defined(_OPENMP) #include <omp.h> #ifndef OMP_SCALE #if defined (SIMD_COEF_32) // some OMP scaling moved into max_keys, so that we can have more values in SIMD // mode, to sort hashes by limb size. (TODO) #define OMP_SCALE 128 #else #define OMP_SCALE 2048 #endif #endif #endif #include "memdbg.h" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define SALT_FIELD_LENGTH 40 #define USER_NAME_LENGTH 12 /* max. length of user name in characters */ #define SALT_LENGTH (USER_NAME_LENGTH * 4) /* bytes of UTF-8 */ #define PLAINTEXT_LENGTH 40 /* Characters */ #define UTF8_PLAINTEXT_LENGTH MIN(125, PLAINTEXT_LENGTH * 3) /* bytes */ #define BINARY_SIZE 20 #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(struct saltstruct) #define SALT_ALIGN 4 #define CIPHERTEXT_LENGTH (SALT_LENGTH + 1 + 2*BINARY_SIZE) /* SALT + $ + 2x20 bytes for SHA1-representation */ #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT NBKEYS // max keys increased to allow sorting based on limb counts #define MAX_KEYS_PER_CRYPT NBKEYS*64 #define GETWORDPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&60)*SIMD_COEF_32 + (unsigned int)index/SIMD_COEF_32*64*SIMD_COEF_32 ) #define GETSTARTPOS(index) ( (index&(SIMD_COEF_32-1))*4 + (unsigned int)index/SIMD_COEF_32*64*SIMD_COEF_32 ) #define GETOUTSTARTPOS(index) ( (index&(SIMD_COEF_32-1))*4 + (unsigned int)index/SIMD_COEF_32*20*SIMD_COEF_32 ) #if ARCH_LITTLE_ENDIAN #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&60)*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*64*SIMD_COEF_32 ) //for endianity conversion #else #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&60)*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*64*SIMD_COEF_32 ) //for endianity conversion #endif #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif //this array is from disp+work (sap's worker process) #define MAGIC_ARRAY_SIZE 160 static const unsigned char theMagicArray[MAGIC_ARRAY_SIZE]= {0x91, 0xAC, 0x51, 0x14, 0x9F, 0x67, 0x54, 0x43, 0x24, 0xE7, 0x3B, 0xE0, 0x28, 0x74, 0x7B, 0xC2, 0x86, 0x33, 0x13, 0xEB, 0x5A, 0x4F, 0xCB, 0x5C, 0x08, 0x0A, 0x73, 0x37, 0x0E, 0x5D, 0x1C, 0x2F, 0x33, 0x8F, 0xE6, 0xE5, 0xF8, 0x9B, 0xAE, 0xDD, 0x16, 0xF2, 0x4B, 0x8D, 0x2C, 0xE1, 0xD4, 0xDC, 0xB0, 0xCB, 0xDF, 0x9D, 0xD4, 0x70, 0x6D, 0x17, 0xF9, 0x4D, 0x42, 0x3F, 0x9B, 0x1B, 0x11, 0x94, 0x9F, 0x5B, 0xC1, 0x9B, 0x06, 0x05, 0x9D, 0x03, 0x9D, 0x5E, 0x13, 0x8A, 0x1E, 0x9A, 0x6A, 0xE8, 0xD9, 0x7C, 0x14, 0x17, 0x58, 0xC7, 0x2A, 0xF6, 0xA1, 0x99, 0x63, 0x0A, 0xD7, 0xFD, 0x70, 0xC3, 0xF6, 0x5E, 0x74, 0x13, 0x03, 0xC9, 0x0B, 0x04, 0x26, 0x98, 0xF7, 0x26, 0x8A, 0x92, 0x93, 0x25, 0xB0, 0xA2, 0x0D, 0x23, 0xED, 0x63, 0x79, 0x6D, 0x13, 0x32, 0xFA, 0x3C, 0x35, 0x02, 0x9A, 0xA3, 0xB3, 0xDD, 0x8E, 0x0A, 0x24, 0xBF, 0x51, 0xC3, 0x7C, 0xCD, 0x55, 0x9F, 0x37, 0xAF, 0x94, 0x4C, 0x29, 0x08, 0x52, 0x82, 0xB2, 0x3B, 0x4E, 0x37, 0x9F, 0x17, 0x07, 0x91, 0x11, 0x3B, 0xFD, 0xCD }; // For backwards compatibility, we must support salts padded with spaces to a field width of 40 static struct fmt_tests tests[] = { {"DDIC$6066CD3147915331EC4C602847D27A75EB3E8F0A", "DDIC"}, /* * invalid IRL because password is too short (would work during login, * but not during password change). We use these tests anyway because * they help verifying key buffer cleaning: */ {"F $646A0AD270DF651065669A45D171EDD62DFE39A1", "X"}, {"JOHNNY $7D79B478E70CAAE63C41E0824EAB644B9070D10A", "CYBERPUNK"}, {"VAN$D15597367F24090F0A501962788E9F19B3604E73", "hauser"}, {"ROOT$1194E38F14B9F3F8DA1B181F14DEB70E7BDCC239", "KID"}, // invalid, because password is too short (would work during login, but not during password change): {"MAN$22886450D0AB90FDA7F91C4F3DD5619175B372EA", "u"}, // SAP user name consisting of 12 consecutive EURO characters: {"\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac" "\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac\xe2\x82\xac" "$B20D15C088481780CD44FCF2003AAAFBD9710C7C", "--+----"}, {"SAP* $60A0F7E06D95BC9FB45F605BDF1F7B660E5D5D4E", "MaStEr"}, {"DOLLAR$$$---$E0180FD4542D8B6715E7D0D9EDE7E2D2E40C3D4D", "Dollar$$$---"}, {NULL} }; static UTF8 (*saved_plain)[UTF8_PLAINTEXT_LENGTH + 1]; static int *keyLen; static int max_keys; #ifdef SIMD_COEF_32 // max intermediate crypt size is 256 bytes // multiple key buffers for lengths > 55 #define LIMB 5 static unsigned char *saved_key[LIMB]; static unsigned char *crypt_key; static unsigned char *interm_crypt; static unsigned int *clean_pos; #else static UTF8 (*saved_key)[UTF8_PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_key)[BINARY_SIZE / sizeof(uint32_t)]; #endif static struct saltstruct { unsigned int l; unsigned char s[SALT_LENGTH]; } *cur_salt; static void init(struct fmt_main *self) { static int warned = 0; #ifdef SIMD_COEF_32 int i; #endif // This is needed in order NOT to upper-case german double-s // in UTF-8 mode. initUnicode(UNICODE_MS_NEW); if (!options.listconf && options.target_enc != UTF_8 && !(options.flags & FLG_TEST_CHK) && warned++ == 0) fprintf(stderr, "Warning: SAP-F/G format should always be UTF-8.\n" "Use --target-encoding=utf8\n"); // Max 40 characters or 125 bytes of UTF-8, We actually do not truncate // multibyte input at 40 characters later because it's too expensive. if (options.target_enc == UTF_8) self->params.plaintext_length = UTF8_PLAINTEXT_LENGTH; #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif max_keys = self->params.max_keys_per_crypt; saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); keyLen = mem_calloc(self->params.max_keys_per_crypt, sizeof(*keyLen)); #ifdef SIMD_COEF_32 clean_pos = mem_calloc(self->params.max_keys_per_crypt, sizeof(*clean_pos)); for (i = 0; i < LIMB; i++) saved_key[i] = mem_calloc_align(self->params.max_keys_per_crypt, SHA_BUF_SIZ * 4, MEM_ALIGN_SIMD); interm_crypt = mem_calloc_align(self->params.max_keys_per_crypt, 20, MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(self->params.max_keys_per_crypt, 20, MEM_ALIGN_SIMD); #else crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); saved_key = saved_plain; #endif } static void done(void) { #ifdef SIMD_COEF_32 int i; #endif MEM_FREE(crypt_key); #ifdef SIMD_COEF_32 MEM_FREE(interm_crypt); for (i = 0; i < LIMB; i++) MEM_FREE(saved_key[i]); MEM_FREE(clean_pos); #endif MEM_FREE(keyLen); MEM_FREE(saved_plain); } static int valid(char *ciphertext, struct fmt_main *self) { int i, j; char *p; if (!ciphertext) return 0; p = strrchr(ciphertext, '$'); if (!p) return 0; if (p - ciphertext > SALT_FIELD_LENGTH) return 0; if (strlen(&p[1]) != BINARY_SIZE * 2) return 0; j = 0; for (i = 0; i < p - ciphertext; i++) { // even those lower case non-ascii characters with a // corresponding upper case character could be rejected if (ciphertext[i] >= 'a' && ciphertext[i] <= 'z') return 0; else if (ciphertext[i] & 0x80) j++; // Reject if user name is longer than 12 characters. // This is not accurate, but close enough. // To be exact, I'd need to keep j unchanged for // the first byte of each character, instead of // incrementing j for every byte >= 0x80. if (i >= USER_NAME_LENGTH + j && ciphertext[i] != ' ') return 0; } // SAP user name cannot start with ! or ? if (ciphertext[0] == '!' || ciphertext[0] == '?') return 0; // the user name must not simply be spaces, or empty for (i = 0; i < p - ciphertext; ++i) { if (ciphertext[i] == ' ') continue; break; } if (ciphertext[i] == '$') return 0; p++; // SAP and sap2john.pl always use upper case A-F for hashes, // so don't allow a-f for (i = 0; i < BINARY_SIZE * 2; i++) if (!(((p[i]>='0' && p[i]<='9')) || ((p[i]>='A' && p[i]<='F')) )) return 0; return 1; } static void set_salt(void *salt) { cur_salt = salt; } static void *get_salt(char *ciphertext) { char *p; static struct saltstruct out; p = strrchr(ciphertext, '$'); out.l = (int)(p - ciphertext); memset(out.s, 0, sizeof(out.s)); memcpy(out.s, ciphertext, out.l); return &out; } static void clear_keys(void) { memset(keyLen, 0, sizeof(*keyLen) * omp_t * MAX_KEYS_PER_CRYPT); } static void set_key(char *key, int index) { strnzcpy((char*)saved_plain[index], key, sizeof(*saved_plain)); keyLen[index] = -1; } static char *get_key(int index) { return (char*)saved_plain[index]; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x,y=0; for (;y<max_keys;y+=SIMD_COEF_32) for (x=0;x<SIMD_COEF_32;x++) { if ( ((unsigned int*)binary)[0] == ((unsigned int*)crypt_key)[x+y*5] ) return 1; } return 0; #else unsigned int index; for (index = 0; index < count; index++) if (!memcmp(binary, crypt_key[index], BINARY_SIZE)) return 1; return 0; #endif } static int cmp_exact(char *source, int index) { return 1; } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 unsigned int x,y; x = index&(SIMD_COEF_32-1); y = (unsigned int)index/SIMD_COEF_32; if ( (((unsigned int*)binary)[0] != ((unsigned int*)crypt_key)[x+y*SIMD_COEF_32*5]) | (((unsigned int*)binary)[1] != ((unsigned int*)crypt_key)[x+y*SIMD_COEF_32*5+SIMD_COEF_32]) | (((unsigned int*)binary)[2] != ((unsigned int*)crypt_key)[x+y*SIMD_COEF_32*5+2*SIMD_COEF_32]) | (((unsigned int*)binary)[3] != ((unsigned int*)crypt_key)[x+y*SIMD_COEF_32*5+3*SIMD_COEF_32])| (((unsigned int*)binary)[4] != ((unsigned int*)crypt_key)[x+y*SIMD_COEF_32*5+4*SIMD_COEF_32]) ) return 0; return 1; #else return !memcmp(binary, crypt_key[index], BINARY_SIZE); #endif } /* * calculate the length of data that has to be hashed from the magic array. pass the first hash result in here. * this is part of the walld0rf-magic * The return value will always be between 32 and 82, inclusive */ #if SIMD_COEF_32 inline static unsigned int extractLengthOfMagicArray(unsigned const char *pbHashArray, unsigned int index) #else inline static unsigned int extractLengthOfMagicArray(unsigned const char *pbHashArray) #endif { unsigned int modSum = 0; #if SIMD_COEF_32 unsigned const char *p = &pbHashArray[GETOUTSTARTPOS(index)]; // [(index/SIMD_COEF_32)*20*SIMD_COEF_32+(index%SIMD_COEF_32)*4] modSum += *p++ % 6; modSum += *p++ % 6; modSum += *p++ % 6; modSum += *p++ % 6; p += 4*(SIMD_COEF_32 - 1); modSum += *p++ % 6; modSum += *p++ % 6; modSum += *p++ % 6; modSum += *p++ % 6; p += 4*(SIMD_COEF_32 - 1); #if ARCH_LITTLE_ENDIAN p += 2; #endif modSum += *p++ % 6; modSum += *p % 6; #else unsigned int i; for (i = 0; i < 10; i++) modSum += pbHashArray[i] % 6; #endif return modSum + 0x20; //0x20 is hardcoded... } /* * Calculate the offset into the magic array. pass the first hash result in here * part of the walld0rf-magic * The return value will always be between 0 and 70, inclusive */ #if SIMD_COEF_32 inline static unsigned int extractOffsetToMagicArray(unsigned const char *pbHashArray, unsigned int index) #else inline static unsigned int extractOffsetToMagicArray(unsigned const char *pbHashArray) #endif { unsigned int modSum = 0; #if SIMD_COEF_32 unsigned const char *p = &pbHashArray[GETOUTSTARTPOS(index)]; // [(index/SIMD_COEF_32)*20*SIMD_COEF_32+(index%SIMD_COEF_32)*4] p += 4*(SIMD_COEF_32)*2; #if !ARCH_LITTLE_ENDIAN p += 2; #endif modSum += *p++ % 8; modSum += *p++ % 8; #if ARCH_LITTLE_ENDIAN p += 2; #endif p += 4*(SIMD_COEF_32 - 1); modSum += *p++ % 8; modSum += *p++ % 8; modSum += *p++ % 8; modSum += *p++ % 8; p += 4*(SIMD_COEF_32 - 1); modSum += *p++ % 8; modSum += *p++ % 8; modSum += *p++ % 8; modSum += *p % 8; #else unsigned int i; for (i = 10; i < 20; i++) modSum += pbHashArray[i] % 8; #endif return modSum; } #if SIMD_COEF_32 inline static void crypt_done(unsigned const int *source, unsigned int *dest, int index) { unsigned int i; unsigned const int *s = &source[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*5*SIMD_COEF_32]; unsigned int *d = &dest[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*5*SIMD_COEF_32]; for (i = 0; i < 5; i++) { *d = *s; s += SIMD_COEF_32; d += SIMD_COEF_32; } } #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; #if SIMD_COEF_32 #define ti (t*NBKEYS+index) unsigned t; #if defined(_OPENMP) #pragma omp parallel for #endif for (t = 0; t < (count-1)/(NBKEYS)+1; t++) { unsigned int index, i, longest; int len; unsigned int crypt_len[NBKEYS]; longest = 0; for (index = 0; index < NBKEYS; index++) { // Store key into vector key buffer if ((len = keyLen[ti]) < 0) { uint32_t *keybuf_word = (uint32_t*)&saved_key[0][GETSTARTPOS(ti)]; #if ARCH_ALLOWS_UNALIGNED const uint32_t *wkey = (uint32_t*)saved_plain[ti]; #else char buf_aligned[UTF8_PLAINTEXT_LENGTH + 1] JTR_ALIGN(4); char *key = (char*)saved_plain[ti]; const uint32_t *wkey = is_aligned(key, 4) ? (uint32_t*)key : (uint32_t*)strcpy(buf_aligned, key); #endif uint32_t temp; len = 0; #if ARCH_LITTLE_ENDIAN while(((unsigned char)(temp = *wkey++))) { if (!(temp & 0xff00)) { *keybuf_word = JOHNSWAP(temp & 0xff); len++; break; } if (!(temp & 0xff0000)) { *keybuf_word = JOHNSWAP(temp & 0xffff); len+=2; break; } *keybuf_word = JOHNSWAP(temp); if (!(temp & 0xff000000)) { len+=3; break; } #else while((temp = *wkey++) & 0xff000000) { if (!(temp & 0xff0000)) { *keybuf_word = (temp & 0xff000000) | (0x80 << 16); len++; break; } if (!(temp & 0xff00)) { *keybuf_word = (temp & 0xffff0000) | (0x80 << 8); len+=2; break; } *keybuf_word = temp; if (!(temp & 0xff)) { *keybuf_word = temp | 0x80U; len+=3; break; } #endif len += 4; if (len & 63) keybuf_word += SIMD_COEF_32; else keybuf_word = (uint32_t*)&saved_key[len>>6][GETSTARTPOS(ti)]; } // Back-out of trailing spaces while(len && saved_plain[ti][len - 1] == ' ') saved_plain[ti][--len] = 0; keyLen[ti] = len; } // 1. we need to SHA1 the password and username for (i = 0; i < cur_salt->l; i++) saved_key[(len+i)>>6][GETPOS((len + i), ti)] = cur_salt->s[i]; len += i; saved_key[len>>6][GETPOS(len, ti)] = 0x80; // Clean rest of this buffer i = len; while (++i & 3) saved_key[i>>6][GETPOS(i, ti)] = 0; for (; i < (((len+8)>>6)+1)*64; i += 4) *(uint32_t*)&saved_key[i>>6][GETWORDPOS(i, ti)] = 0; // This should do good but Valgrind insists it's a waste //if (clean_pos[ti] < i) // clean_pos[ti] = len + 1; if (len > longest) longest = len; ((unsigned int*)saved_key[(len+8)>>6])[15*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + ti/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = len << 3; crypt_len[index] = len; } SIMDSHA1body(&saved_key[0][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&crypt_key[t*20*NBKEYS], NULL, SSEi_MIXED_IN); // Do another and possibly a third limb memcpy(&interm_crypt[t*20*NBKEYS], &crypt_key[t*20*NBKEYS], 20*NBKEYS); for (i = 1; i < (((longest + 8) >> 6) + 1); i++) { SIMDSHA1body(&saved_key[i][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], SSEi_MIXED_IN|SSEi_RELOAD); // Copy any output that is done now for (index = 0; index < NBKEYS; index++) if (((crypt_len[index] + 8) >> 6) == i) crypt_done((unsigned int*)interm_crypt, (unsigned int*)crypt_key, ti); } longest = 0; for (index = 0; index < NBKEYS; index++) { unsigned int offsetMagicArray; unsigned int lengthIntoMagicArray; const unsigned char *p; int i; // If final crypt ends up to be 56-61 bytes (or so), this must be clean for (i = 0; i < LIMB; i++) if (keyLen[ti] < i * 64 + 55) ((unsigned int*)saved_key[i])[15*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + ti/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = 0; len = keyLen[ti]; lengthIntoMagicArray = extractLengthOfMagicArray(crypt_key, ti); offsetMagicArray = extractOffsetToMagicArray(crypt_key, ti); // 2. now, hash again --> sha1($password+$partOfMagicArray+$username) --> this is CODVNG passcode... i = len - 1; p = &theMagicArray[offsetMagicArray]; // Copy a char at a time until aligned (at destination)... while (++i & 3) saved_key[i>>6][GETPOS(i, ti)] = *p++; // ...then a word at a time. This is a good boost, we are copying between 32 and 82 bytes here. #if ARCH_ALLOWS_UNALIGNED for (;i < lengthIntoMagicArray + len; i += 4, p += 4) #if ARCH_LITTLE_ENDIAN *(uint32_t*)&saved_key[i>>6][GETWORDPOS(i, ti)] = JOHNSWAP(*(uint32_t*)p); #else *(uint32_t*)&saved_key[i>>6][GETWORDPOS(i, ti)] = *(uint32_t*)p; #endif #else for (;i < lengthIntoMagicArray + len; ++i, ++p) { saved_key[i>>6][GETPOS(i, ti)] = *p; } #endif // Now, the salt. This is typically too short for the stunt above. for (i = 0; i < cur_salt->l; i++) saved_key[(len+lengthIntoMagicArray+i)>>6][GETPOS((len + lengthIntoMagicArray + i), ti)] = cur_salt->s[i]; len += lengthIntoMagicArray + cur_salt->l; saved_key[len>>6][GETPOS(len, ti)] = 0x80; crypt_len[index] = len; // Clean the rest of this buffer as needed i = len; while (++i & 3) saved_key[i>>6][GETPOS(i, ti)] = 0; for (; i < clean_pos[ti]; i += 4) *(uint32_t*)&saved_key[i>>6][GETWORDPOS(i, ti)] = 0; clean_pos[ti] = len + 1; if (len > longest) longest = len; ((unsigned int*)saved_key[(len+8)>>6])[15*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + ti/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = len << 3; } SIMDSHA1body(&saved_key[0][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], NULL, SSEi_MIXED_IN); // Typically, no or very few crypts are done at this point so this is faster than to memcpy the lot for (index = 0; index < NBKEYS; index++) if (crypt_len[index] < 56) crypt_done((unsigned int*)interm_crypt, (unsigned int*)crypt_key, ti); // Do another and possibly a third, fourth and fifth limb for (i = 1; i < (((longest + 8) >> 6) + 1); i++) { SIMDSHA1body(&saved_key[i][t*SHA_BUF_SIZ*4*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], (unsigned int*)&interm_crypt[t*20*NBKEYS], SSEi_MIXED_IN|SSEi_RELOAD); // Copy any output that is done now for (index = 0; index < NBKEYS; index++) if (((crypt_len[index] + 8) >> 6) == i) crypt_done((unsigned int*)interm_crypt, (unsigned int*)crypt_key, ti); } } #undef t #undef ti #else #ifdef _OPENMP int index; #pragma omp parallel for for (index = 0; index < count; index++) #else #define index 0 #endif { unsigned int offsetMagicArray, lengthIntoMagicArray; unsigned char temp_key[BINARY_SIZE]; unsigned char tempVar[UTF8_PLAINTEXT_LENGTH + MAGIC_ARRAY_SIZE + SALT_LENGTH]; //max size... SHA_CTX ctx; if (keyLen[index] < 0) { keyLen[index] = strlen((char*)saved_key[index]); // Back-out of trailing spaces while (saved_key[index][keyLen[index] - 1] == ' ') { saved_key[index][--keyLen[index]] = 0; if (keyLen[index] == 0) break; } } //1. we need to SHA1 the password and username memcpy(tempVar, saved_key[index], keyLen[index]); //first: the password memcpy(tempVar + keyLen[index], cur_salt->s, cur_salt->l); //second: the salt(username) SHA1_Init(&ctx); SHA1_Update(&ctx, tempVar, keyLen[index] + cur_salt->l); SHA1_Final((unsigned char*)temp_key, &ctx); lengthIntoMagicArray = extractLengthOfMagicArray(temp_key); offsetMagicArray = extractOffsetToMagicArray(temp_key); //2. now, hash again --> sha1($password+$partOfMagicArray+$username) --> this is CODVNG passcode... memcpy(tempVar + keyLen[index], &theMagicArray[offsetMagicArray], lengthIntoMagicArray); memcpy(tempVar + keyLen[index] + lengthIntoMagicArray, cur_salt->s, cur_salt->l); SHA1_Init(&ctx); SHA1_Update(&ctx, tempVar, keyLen[index] + lengthIntoMagicArray + cur_salt->l); SHA1_Final((unsigned char*)crypt_key[index], &ctx); } #undef index #endif return count; } static void *get_binary(char *ciphertext) { static int outbuf[BINARY_SIZE / sizeof(int)]; char *realcipher = (char*)outbuf; int i; char* newCiphertextPointer; newCiphertextPointer = strrchr(ciphertext, '$') + 1; for (i=0;i<BINARY_SIZE;i++) { realcipher[i] = atoi16[ARCH_INDEX(newCiphertextPointer[i*2])]*16 + atoi16[ARCH_INDEX(newCiphertextPointer[i*2+1])]; } #if defined(SIMD_COEF_32) && ARCH_LITTLE_ENDIAN alter_endianity((unsigned char*)realcipher, BINARY_SIZE); #endif return (void*)realcipher; } #if 0 // Not possible with current interface static char *source(struct db_password *pw, char Buf[LINE_BUFFER_SIZE] ) { struct saltstruct *salt_s = (struct saltstruct*)(pw->source); unsigned char realcipher[BINARY_SIZE]; unsigned char *cpi; char *cpo; int i; memcpy(realcipher, pw->binary, BINARY_SIZE); #ifdef SIMD_COEF_32 alter_endianity(realcipher, BINARY_SIZE); #endif memcpy(Buf, salt_s->s, salt_s->l); cpo = &Buf[salt_s->l]; *cpo++ = '$'; cpi = realcipher; for (i = 0; i < BINARY_SIZE; ++i) { *cpo++ = itoa16u[(*cpi)>>4]; *cpo++ = itoa16u[*cpi&0xF]; ++cpi; } *cpo = 0; return Buf; } #endif #define COMMON_GET_HASH_SIMD32 5 #define COMMON_GET_HASH_VAR crypt_key #include "common-get-hash.h" // Here, we remove any salt padding and trim it to 44 bytes static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[CIPHERTEXT_LENGTH + 1]; char *p; int i; p = strrchr(ciphertext, '$'); i = (int)(p - ciphertext) - 1; while (ciphertext[i] == ' ' || i >= SALT_LENGTH) i--; i++; memset(out, 0, sizeof(out)); memcpy(out, ciphertext, i); strnzcpy(&out[i], p, CIPHERTEXT_LENGTH + 1 - i); return out; } // Public domain hash function by DJ Bernstein static int salt_hash(void *salt) { struct saltstruct *s = (struct saltstruct*)salt; unsigned int hash = 5381; unsigned int i; for (i = 0; i < s->l; i++) hash = ((hash << 5) + hash) ^ s->s[i]; return hash & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_sapG = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #if !defined(SIMD_COEF_32) || defined(SIMD_PARA_SHA1) FMT_OMP | #endif FMT_CASE | FMT_8_BIT | FMT_UTF8, { NULL }, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
wpqb.c
#include "wpqb.h" static_assert(sizeof(wpqb) == sizeof(long double), "sizeof(wpqb) != sizeof(long double)"); int wpqb_cmp(const wpqb a[static 1], const wpqb b[static 1]) { if (a == b) return 0; if (a->w < b->w) return 1; if (a->w > b->w) return -1; if (a->w == b->w) { if (a->i.b < b->i.b) return 2; if (a->i.b > b->i.b) return -2; if (a->i.p < b->i.p) return 3; if (a->i.p > b->i.p) return -3; if (a->i.q < b->i.q) return 4; if (a->i.q > b->i.q) return -4; return 0; } if (b->w == b->w) return 5; if (a->w == a->w) return -5; // both weights are NaN if (a->i.b < b->i.b) return 2; if (a->i.b > b->i.b) return -2; if (a->i.p < b->i.p) return 3; if (a->i.p > b->i.p) return -3; if (a->i.q < b->i.q) return 4; if (a->i.q > b->i.q) return -4; return 0; } #ifdef _OPENMP static void wpqb_sort1(const uint32_t n_a, wpqb a[static 1]) { static uint32_t oe, na, s; if (!n_a) return; oe = 0u; na = (n_a - 1u); s = 0u; for (uint32_t swps = ~0u; swps; (oe ^= 1u), (swps = s), (s = 0u)) { // odd-even sort #pragma omp parallel for default(none) shared(oe,na,a) reduction(+:s) for (uint32_t i = oe; i < na; i += 2u) { const uint32_t j = (i + 1u); if (wpqb_cmp((a + i), (a + j)) > 0) { const wpqb t = a[i]; a[i] = a[j]; a[j] = t; ++s; } } } } #else /* !_OPENMP */ static void wpqb_sort0(const uint32_t n_a, wpqb a[static 1]) { if (n_a) qsort(a, n_a, sizeof(wpqb), (int (*)(const void*, const void*))wpqb_cmp); } #endif /* ?_OPENMP */ static void wpqb_ncp0(const uint16_t n, const uint32_t n_a, wpqb a[static 1], const uint16_t n_s, uint32_t s[static 1], wpqb_info w[static 1]) { w->w = qNaN(); w->i.s = UINT16_C(0); w->i.f = 0u; if (!n) return; if (!n_s) return; if (!n_a) return; bool r[n]; for (uint16_t i = UINT16_C(0); i < n; ++i) r[i] = false; uint16_t ns = (uint16_t)(n >> 1u); ns = (uint16_t)(((uint32_t)ns <= n_a) ? ns : (uint16_t)n_a); ns = (uint16_t)((ns <= n_s) ? ns : n_s); for (uint32_t j = 0u, k; w->i.s < ns; j = k) { s[w->i.s] = j; r[a[j].i.p] = true; r[a[j].i.q] = true; for (k = j + 1u; k < n_a; ++k) if (!(r[a[k].i.p] || r[a[k].i.q])) break; (w->i.s)++; if (k >= n_a) break; } w->w = 0.0L; for (uint16_t i = w->i.s; i; ) w->w += a[s[--i]].w; } #ifdef _OPENMP static void wpqb_update(uint32_t s[static restrict 1], const uint32_t my_s[static restrict 1], wpqb_info w[static restrict 1], const wpqb_info my_w[static restrict 1]) { if (!(my_w->w <= w->w) || ((my_w->w == w->w) && (my_w->i.f < w->i.f))) { for (uint16_t i = UINT16_C(0); i < my_w->i.s; ++i) s[i] = my_s[i] + my_w->i.f; *w = *my_w; } } static void wpqb_ncpt(const uint16_t n, const uint32_t n_a, wpqb a[static 1], const uint32_t f, const uint16_t n_s, uint32_t s[static 1], wpqb_info w[static 1]) { uint32_t my_s[n_s]; wpqb_info my_w; wpqb_ncp0(n, n_a, a, n_s, my_s, &my_w); if (my_w.i.s) { my_w.i.f = f; #pragma omp critical wpqb_update(s, my_s, w, &my_w); } } static void wpqb_ncp1(const uint16_t n, const uint32_t n_a, wpqb a[static 1], const uint16_t n_s, uint32_t s[static 1], wpqb_info w[static 1]) { w->w = qNaN(); w->i.s = UINT16_C(0); w->i.f = n_a; if (!n) return; if (!n_s) return; if (!n_a) return; #pragma omp parallel for default(none) shared(n,n_a,a,n_s,s,w) schedule(nonmonotonic:dynamic,1) for (uint32_t i = 0u; i < n_a; ++i) wpqb_ncpt(n, n_a - i, a + i, i, n_s, s, w); } #endif /* _OPENMP */ void wpqb_ncp(const uint16_t n, const uint32_t n_a, wpqb a[static 1], const uint16_t n_s, uint32_t s[static 1], wpqb_info w[static 1]) { w->w = qNaN(); w->i.s = UINT16_C(0); w->i.f = 0u; #ifdef _OPENMP wpqb_sort1(n_a, a); #else /* !_OPENMP */ wpqb_sort0(n_a, a); #endif /* ?_OPENMP */ for (w->i.f = n_a; w->i.f; ) { --(w->i.f); if (a[w->i.f].w >= -LDBL_MAX) { ++(w->i.f); break; } } if (!(w->i.f)) return; #ifdef _OPENMP wpqb_ncp1(n, w->i.f, a, n_s, s, w); #else /* !_OPENMP */ wpqb_ncp0(n, w->i.f, a, n_s, s, w); #endif /* ?_OPENMP */ }
sc35_fixedRepulse.c
/*MODIFICATIONS: when wl=3 .. rotation of 0th particle it is allowed to move pokousime se odstranit bug ze castice pri wl=1 se obcas zasekne je to kvuli pohybu center of mass zrejme je chybou tim ze kvuli akumulaci numerickych chyb se cas od casu prepocita centrum hmotnosti systemu u toho se pouziji periodicke okrajove podminky, ktere to cele mohou zkazit protoze castice co odletela do druheho boxu pri navratu od primarniho boxu (-z/2 do z/2) posune stred hmostnosti testujeme - odstranili jsme prepocet stredu hmotnosti z obav z numerickych chyb vypis "F I N I S H E D" do stdout po splneni wl podminek */ /*TODO in future s - Non-equilibrium candidate moves - check scaling of particles of different sizes - should scale with contact area! - cell list - divide simulation box in cells where particles interact with each other and outside is definitely 0 - safe time better scaling with system size, possibly long spherovylinders could be in several celles to keep good scaling - better cluster algorithm - put in wang-landau - cluster list work for spherocylinders only now */ /*------------------------------------------------------------------------------ Version 3.5 - linear bond at spherocylinders, where second spherocilinder is harmonicaly attached to a point that is in distance of bondlength from the first spherocylinder and it follows the direction of spherocylinder - bonded particles belong to the same cluster - print energy at statistical reports - have particles of different lengths - interaction scaling back to v1+v2 (no addition of 1.0) - more physical */ /*------------------------------------------------------------------------------ Version 3.4 - New handling of the option file - reaction coordinate radius around z axis for a pore calculations - reaction coordinate as number of particles in contact (defined by distance of CMs) - 2D Wang-Landau method - New Wang-Landau coordinate - radius pore in vesicle around begining of xy plane - New models TPSC, TCPSC, TCHPSC, TCHCPSC- models with two patches note that switch function on sides of patch are linear in cos angle not in angle as a results two patches with overlaping sides do not compensate easily to a flat profile - FIX chirality was doubled (angle twice as large) - Added posibility of exluded interactions [EXCLUDE] in topology file - MPI replica exchange with different temperatures and pressure (paraltemp paralpress) input configuration is #{number of process}config.init, if it does not exist config.init is used each replica is with different random seed = seed+mpirank - config.init can look like movie snapshot - MPI exchange with Wang-Landau - added angular interaction between neighboring spherocylinders (in chain) angle1 is angle between sc directions and angle2 ins angle between the patches */ /*------------------------------------------------------------------------------- Version 3.3 -external potantial can be added as a part of topology - it can be hard or attractive wall */ /** * Changes made by Noah S. Bieler and Robert Vacha: * * New version 3.2 * * - The length has now to be specified in the topology file, but they are not * allowed to differ from each other. The option file shall no longer contain * a length option. * - The particles can now switch their type based on the chemical potential * delta_mu (= energy difference from state 2 to state 1). * - For that a new option was introduced: Average attempts per sweep to switch * a type. * - A lot of variables are now combined in either topo, sim or conf. The rule * should be: * > topo: Everything that belongs to the topology and that should not change * during the game. * > sim: Options and stuff, that has to do with the simulation. (Maybe the * current target and so should be saved in there as well) * > conf: What changes every step concerning the particles and the box or * in other words: what has been read from conf.init * - added a cluster determing routine => sim->clusterlist + sim->clusters * - added macros for TRUE and FALSE * - Added Option for the random seed * - Basic Neighbour list implemented * - New types: chiral CPSC (CHCPSC) and chiral PSC (CHPSC) and their interactions */ /*-------------------------------------------------------------------------------- sc31.c Patchy Spherocylinder Version 3.1 Wang-Landau method of free energy calculations It is set in options file as: O = none, 1 = z-distance of 1st paticle from system CM, 2 = hole in xyplane of SCA = membrane hole It reads a file wl.dat and write wl-new at the end. There is value of alpha at the first line and then there are three columns: 1- order parameter, 2- weights, 3- histogram Interaction of spherocylinders is scaled based on the volume of attractive patch, the unit of one is that two spheres of diameter sigma =1.0 are attracting each other by 1.0. Using this in interaction among lipids and spherocylinders should be consistent. Start up configuration "config.init" file has a box size at the first line now. (I tested performance: compilation with optimization -O2 speed up 10% rest has negligible effect including usage of static arrays instead of dynamic most of the time consumes paire function. 6,519,638,177 :simulate 6,492,411,300 :energyone 5,705,685,593 :paire 542,561,887 :bondenergy 489,463,361 :eattractive11 450,443,970 :image 115,126,519 :erepulsive */ /* -------------------------------------------------------------------------------- sc3.c Patchy Spherocylinder Version 3.0 Beads were added to the particle list. bead(10) - repulsive bead(11) - isotropocally attractive -It is necessary to provide also a topology file (top.init) -Particles are placed in chains according to the topology order including connections -Particle arryas are allocated dynamicly on heap now -dispacement and rotation are optimized for highest RMSD performace -NPT ensemble with isotropic and anisotropic couplings, in pressure moves all particles are rescaled with their center (chains are not rescaled with CM) 0 - anisotropic coupling, 1 - isotropic coupling, 2 - isotropic in xy z=const bead types and their interactions repulsive(10) purely repulsive shpere with WCA potential on closest distance parameters: Patch repulsion sigma - defined where repulsion reaches zero isotropic(11) - isotropic cos^2 potential is acting isotropicaly dependent only on closest distance between obejcts. Parameters: distance of attractivity (should be at least sigma*2^(1/6)) defines how far is attraction constant -e. After this distance follows switch length on which attraction goes to zero as cos^2. Rest as repulsive model. sc2.c Patchy Spherocylinder Version 2.0 It is possible to make chains of spherocylinders that are connected through hemispherical caps by harmonic bond. There are two parameters eq distance and strength of harmonic spring, note that units are in 1 kT/e, the MC strength of bond is changing with parameter temperature.. Patchy Spherocylinder Version 1.0 Includes diffferent types of possible interactions: repulsive(0) - purely repulsive spherocylinder with WCA potential on closest distance. parameters: Patch repulsion sigma - defined where repulsion reaches zero. isotropic(1) - isotropic cos^2 potential is acting isotropicaly dependent only on closest distance between spherocylinders. Parameters: distance of patch, Interaction distance of patch (should be at least sigma*2^(1/6)) defines how far is attraction constant -e. After this distance follows Switch length on which attraction goes to zero as cos^2. Rest as repulsive model. patchy(2) - Attractive potential in limited to an angular wedge on spherocylinder. Patch goes all the way through, making also hemispherical caps on end attractive. Parameters:Anglular part has a parameter defining it size "Angular size of patch (degrees)" and witdh of switch function "Angular switch off of patch (degrees)" on which attraction reaches zero - it is a linear function. Rest as isotropic model. cylindrical(3) - Attractive potential in limited to an angular wedge on cylindrical part of spherocylinders. The hemispherical caps on ends are repulsive. Rest as patchy model. Note particles are inside numbered from 0, there is prealocated size of particles MAXN because in future there can be grand canonical ensamble and number of particles may vary Follows mc of hard wall spherocylinder version 7 by Mark Miller -description below sc.c Version 1 Performs basic constant volume MC simulation of hard spherocylinders with rigid cuboidal boundary conditions. Run parameters are read in from the file "options". The template for this file appears at the end of the code. The values must be inserted before the colons. The initial configuration is read from the file "config.init". The first line contain size of box The format for the file is nine columns: three for the positions and three for the direction vector and three for direction of pathc. The direction vectors are normalised after being read in. The configuration is checked for particle overlaps. The unit of length is taken as the spherocylinder diameter. Hence the ratio L/D is equal to the length of the cylinder. Order parameters for nematic and smectic order are evaluated. The nematic order parameter is related to the coefficient of the quadratic term in the Legendre expansion of the orientational distribution function. Any smectic order is assumed to be directed along the z axis, and is detected by the coefficients of the Fourier expansion of the position distribution function. MM 12.vii.01 .................................................................................. Version 2 The aspect ratio of the box may now fluctuate, keeping the volume constant. Two new parameters are required in the options file to specify the average number of attempted shape changes per sweep, and the initial maximum trial change in a box dimension. Shape changes are made by picking one of the three box lengths at random, making a random change, evenly distributed between plus and minus a finite interval, choosing a second direction and doing the same, then determining the new length in the remaining direction from the condition of constant volume. The step-size equilibration period is now split into three parts: displacement, rotation, and shape change. The most important change to the code is that the particle coordinates are now stored as fractions of the box dimensions. However, input and output configurations are still communicated in units of the cylinder diameter, D=1. Note that the displacement maximum step size is now specified as a fraction of the box length, not as an absolute distance. MM 18.vii.01 .................................................................................. Version 3 Constant pressure MC. The volume may fluctuate. Volume changes are attempted by altering just one box length at a time, chosen at random. The running average of the density is calculated and reported. MM 24.vii.01 .................................................................................. Version 7 The composite translation-plus-rotation moves have been split into separate move types, each of which is attempted with equal probability. This enables acceptance ratios to be accumulated separately for these degrees of freedom, so that maximum step sizes can be adjusted more sensibly. A few other things have been tidied up, such as defining structures for the book-keeping of statistics and acceptance ratios. MM 9.v.02 --------------------------------------------------------------------------------*/ #ifndef _GNU_SOURCE # define _GNU_SOURCE #endif #include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #ifdef MACOS # include "getline.h" #endif #ifdef MPI # include <mpi.h> #endif /* Macros for DEBUG messages */ #ifdef DEBUGGING_INIT #define DEBUG_INIT(...) fprintf(stderr, "DB in INIT: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); fflush(stderr); #else #define DEBUG_INIT(...) #endif #ifdef DEBUGGING_SIM #define DEBUG_SIM(...) fprintf(stderr, "DB in SIM: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); fflush(stderr); #else #define DEBUG_SIM(...) #endif #ifdef DEBUGGING #define DEBUG(...) fprintf(stderr, "DB: "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n"); fflush(stderr); #else #define DEBUG(...) #endif /* End of DEBUG macros */ /* With pairlist ? */ #define WITH_PAIRLIST /* Boolean Macros */ #define BOOL int #define TRUE 1 #define FALSE 0 /* End of Boolean Macros */ #define MAXF 20 /* Maximum number of Fourier terms */ #define MAXN 14000 /* Maximum number of particles */ #define MAXCHL 10 /* Maximum length of chain */ #define ZEROTOL 1.0e-12 /* Dot products below ZEROTOL are deemed zero */ #define ZEROTOL2 1.0e-8 /* numbers below ZEROTOL are deemed zero */ #define PI 3.141592653589793238462643383279 /* pi */ #define PIH 1.57079632679489661923132169163975 /* pi half*/ /*Particle types*/ #define SC 10 /*spherocylinder*/ #define SCN SC+0 /*spherocylinder non-attractive*/ #define SCA SC+1 /*spherocylinder isotropicaly attractive*/ #define PSC SC+2 /*spherocylinder with patchy attraction*/ #define CPSC SC+3 /*spherocylinder with cylindrical patchy attraction*/ #define CHPSC SC+4 /* chiral psc */ #define CHCPSC SC+5 /* chiral cpsc */ #define TPSC SC+6 /*spherocylinder with two patches*/ #define TCPSC SC+7 /*spherocylinder with two cylindrical patches*/ #define TCHPSC SC+8 /* chiral 2psc */ #define TCHCPSC SC+9 /* chiral 2cpsc */ #define SP 30 /*sphere - should be over all apherocylinders*/ #define SPN SP+0 /* sphere non-attractive*/ #define SPA SP+1 /* spherocylinder isotropicaly attractive*/ #define MAXT 30 /* Maximum number of types we have*/ #define MAXMT 100 /* Maximum number of molecular types */ /*Reading topology*/ #define SMSTR 64 /* Small string length */ #define STRLEN 400 /* maximum length of line*/ #define CONTINUE '\\' /* symbol for line continue*/ #define COMMENTSIGN '#' /* symbol for comment*/ #define OPENKEY '[' /* starting sign for keyword*/ #define CLOSEKEY ']' /* ending sign for keyword*/ #define SEPARATOR ':' /* sign for separator*/ #define OPENMOL '{' /* starting sign for molecules*/ #define CLOSEMOL '}' /* ending sign for molecules*/ #define BOXSEP 'x' /* extraction of box*/ /* Wang Landau method */ #define WL_GERR 0.0001 /* Max roughnes in histogram */ #define WL_ALPHATOL 0.0000001 /* Covergence crietria for detailed balance */ #define WL_MINHIST 1000 /* Minimum histogram sampling for considering roughness */ #define WL_ZERO 0.000000000000 /* Zero for histogram with current weights*/ #define WL_CONTACTS 36.0 /* Square distance under which are particles in contact */ /* Math */ #define DOT(a,b) ((a).x * (b).x + (a).y * (b).y + (a).z * (b).z) /* Dot product */ #define AVER(a,b) ((a+b)*0.5) /* Arithmetic average*/ #define ROUND(a) (a > 0.0) ? floor(a + 0.5) : ceil(a - 0.5); /* Round double*/ #define PMONE(a) (1 - 2 * a) /* Takes 1 or 0, return +-1 */ /* Acceptance ratio */ #define RATIO(a) ( ((a).acc+(a).rej) > 0 ? 1.0*(a).acc/((a).acc+(a).rej) : 0.0 ) #define INBOX(a,b) ( a > 0 ? modf(a,&b) : modf(a,&b)+1 ) /*................................................................ Structure definitions */ struct vector { /* Define a 3D vector structure */ double x; double y; double z; }; struct quat { /* Define a quaternion structure */ double w; double x; double y; double z; }; struct particles { /* Define a particle */ struct vector pos; /* Position vector */ struct vector dir; /* Unit direction vector of axis */ struct vector patchdir[2]; /* Vector defining orientation of patch */ struct vector patchsides[4]; /* Vector defining sides of patch */ struct vector chdir[2]; /* Direction for chirality - keep in memory to increase speed */ long chaint; /* Chain type*/ long chainn; /* Chain number*/ int type; /* Type of the particle */ int switchtype; /* With which kind of particle do you want to switch?*/ double delta_mu; /* Chemical potential for the switch */ int switched; /* 0: in initial stat; 1: in the switched stat */ }; struct ia_param{ /* Contatins properties and parameters of particle types */ char name[SMSTR]; /* The name of the particle type */ char other_name[SMSTR]; /* The name of the particle type */ int geotype[2]; /* The geometrical type: spherocylinder (0-repulsive, 1-isotropic, 2-patchy, 3-cylindrical) or sphere (0-repulsive, 1-isotropic) */ double sigma; /* Repulsion wca*/ double epsilon; /* Repulsion strength*/ double pdis; /* Interaction distance of patch */ double pswitch; /* Switch of distance of patch */ double pangl[4]; /* angular size of patch as was specifid in input */ double panglsw[4]; /* angular size of patchswitch as was specifid in input */ double pcangl[4]; /* cosine of half size angle - rotation from patch direction to side */ double pcanglsw[4]; /* cosine of half size angle plus switch - rotation from patch direction to side */ double rcut; /* Cutoff for attraction */ double rcutwca; /* Cutoff for repulsion*/ double pcoshalfi[4]; /* Cosine of half angle going to side of interaction */ double psinhalfi[4]; /* Sine of half angle going to side of interaction -useful for quaterion rotation */ double csecpatchrot[2]; /* Cosine of Rotation of second patches in 2psc models*/ double ssecpatchrot[2]; /* Sine of Rotation of second patches in 2psc models*/ double volume; /* Volume of particle for geometrical center calculations*/ double pvolscale; /* Scale of patch volume size*/ double len[2]; /* Length of the PSC */ double half_len[2]; /* Half length of the PSC */ double chiral_cos[2]; /* Coctains the cosinus for the chiral rotation of the patch */ double chiral_sin[2]; /* Contains the sinus for the chiral rotation of the patch */ }; struct interacts { /* Parameters pased to functions of interaction calculation */ double dist; /* closest distance */ struct vector distvec; /* vector of closes distance */ struct particles * part1; /* particle 1 */ struct particles * part2; /* particle 2 */ struct vector box; /* box size */ struct ia_param * param; /* interaction parameters */ struct vector r_cm; /* vector connecting center of masses */ double distcm; /* distance between center of masses */ double dotrcm; /* square size of r_cm*/ double contt; /* closest point on spherocylinder to sphere */ }; struct chainparams { /*Parameters for inner interaction in chains*/ double bond1eq; /* Equilibrium distance of harmonic bond between nearest neighbours*/ double bond1c; /* Spring constant for harmonic bond between nearest neighbours*/ double bond2eq; /* Equilibrium distance of harmonic bond between second nearest neighbours*/ double bond2c; /* Spring constant for harmonic bond between second nearest neighbours*/ double bonddeq; /* Equilibrium distance of directional harmonic bond between the nearest neighbours*/ double bonddc; /* Spring constant for directional harmonic bond between the nearest neighbours*/ double angle1eq; /* Equilibrium angle between two spherocylinders -neerest neighbours*/ double angle1c; /* Spring constant angle between two spherocylinders -nearest neighbours*/ double angle2eq; /* Equilibrium angle between two spherocylinder patches -nearest neighbours*/ double angle2c; /* Spring constant for angle between two spherocylinder patches -nearest neighbours*/ }; struct molecule { /* This structure is for io only */ char * name; /* The name of the molecule */ long * type; /* The type of the particle */ long * switchtype; /* The switchtype of the particle */ double * delta_mu; /* The chemical potential for the switch */ }; struct disp { /* Define step size and acceptance ratio statistics */ double mx; /* Maximum value displacement, cos(angle), etc. */ double angle; /* Maximum angle, since in .mx cos(angle) is saved */ long acc; /* Number of accepted steps */ long rej; /* Number of rejected steps */ double oldrmsd; /* Averaged mx value in previous equilibration round */ double oldmx; /* Change in mx in last equlibrium step */ }; struct stat { /* Define statistics counters */ double sum; double sum2; long samples; double mean; double rms; }; struct meshs { /* Mesh for hole order parameter */ int dim[2]; /* Mesh dimensions */ int *data; /* Mesh data */ int *tmp; /* tmpporary list for hole search */ }; struct wls { /* Wang landau method (wl) */ double *weights; /* Array of weights for wl method */ long *hist; /* Array of histogram for wl method */ long length[2]; /* Length of above arrays */ double dorder[2]; /* Increments of order parameter */ double minorder[2]; /* Minimum order parameter */ double alpha; /* Current modifier of weights */ long currorder[2]; /* Walue of current order parameter*/ long neworder[2]; /* wl order parameter in new step */ long max; /* wl maximum of histogram */ long min; /* wl minimum of histogram */ double wmin; /* weights minimum */ int wlmdim; /* Dimwnsionality of wang landau */ int wlmtype; /* Atom type for the Wang landau method (wl) */ double wl_meshsize; /* Size of mesh bin for hole order paremeter*/ struct meshs mesh; /* Mesh for hole order */ struct meshs origmesh; /* Mesh store for rejected moves */ long * radiushole; /* Array for hole radius around origin */ long * radiusholeold; /* Array for hole radius around origin-bigmove */ long radiusholemax; /* Size of array for hole radius*/ long partincontact; /* Number of particles in contact */ long partincontactold; /* Number of particles in contact - old for move*/ }; struct pairs{ /* The structure holding the particle numbers of the pairs and the number of pairs */ long num_pairs; /* The number of pairs */ long * pairs; /* The paritcle numbers of the paris */ }; struct pairlist{ /* I think, this is done too complicated: just sim->pairs[npart] should be enough */ struct pairs * list; /* contains the pairlist of all paritcles */ }; struct cluster{ /* contains all the particles of one cluster */ long npart; long * particles; }; struct exters{ BOOL exist; /* existence of external potential*/ double thickness; /* external wall thicnkess*/ double epsilon; /* depth of attraction*/ double attraction; /* distance of attraction*/ double sqmaxcut; /* distance when nothing can interact*/ struct ia_param interactions[MAXT]; /* Interaction parameters with particle types generated from above params*/ }; struct topo{ /* It would be nice, if this struct would contain all the topo stuff in the end*/ long * switchlist; /* List containing the number of all the particles with switchtypes */ long n_switch_part; /* number of particles with switchtype */ double sqmaxcut; /* square of distance over which even spherocylinders cannot interact (distance between CM) */ double maxcut; /* distance over which even spherocylinders cannot interact (distance between CM) */ long conlist[MAXN][4]; /* Connectivity list, we have connection to tail and head and secon neighbours so far*/ long chainlist[MAXN][MAXCHL]; /* List of chains*/ long chainnum; /* Number of chains */ struct chainparams chainparam[MAXMT]; /* parameters for chains */ struct ia_param ia_params[MAXT][MAXT]; /* parametrization of particles for all interations*/ long npart; /* Number of particles */ struct exters exter; /* external potential - wall */ }; struct sim{ /* Should contain mostly all the simulation options and variables, that can change in every step. */ double press; /* Pressure */ double paralpress; /* Parallel pressure for replica exachnge*/ double dpress; /* Pressure change for replica exchange*/ double shave; /* Average number of volume changes to attempt per sweep */ double shprob; /* Probability of attempting a volume change */ double chainprob; /* Average number of chain move attempt per sweep */ double switchprob; /* Average number of type switch attempt per sweep */ int pairlist_update; /* Number of sweep per upedating the pairlist */ double temper; /* Temperature*/ double paraltemper; /* Temperature for parallel tempering */ double dtemp; /* Temprature step */ int ptype; /* Type of pressure coupling*/ long adjust; /* Number of sweeps between step size adjustments */ long movie; /* Number of sweeps between movie frames */ long nequil; /* Number of equilibration sweeps */ long nsweeps; /* Number of production sweeps */ long paramfrq; /* Number of sweeps between order parameter samples */ long report; /* Number of sweeps between statistics reports */ // long terms; /* Number of Fourier terms as smectic order parameters */ long nrepchange; /* Number of sweeps between replica exchanges */ int wlm[2]; /* Wang landau method (wl) */ struct disp edge; /* Maximum box length change and statistics */ struct disp rot[MAXT]; /* Maximum rotation and statistics */ struct disp trans[MAXT]; /* Maximum translation and statistics*/ struct disp chainm[MAXMT]; /* Maximum translation for chain and statistics*/ struct disp chainr[MAXMT]; /* Maximum rotation for chain and statistics */ struct disp mpiexch; /* MPI statistics*/ struct pairs * pairlist; /* The pairlist */ long write_cluster; /* Number of sweeps per writing out cluster info */ long * clusterlist; /* clusterlist[i] = cluster index of particle i */ struct cluster * clusters; /* informations about the single clusters */ double *clustersenergy; /* list of energies of clusters*/ long num_cluster; /* number of single clusters */ long * clusterstat; /* Statistics about the seize of cluster */ long max_clust; /* maximal clustersize */ struct wls wl; /* Wang landau data */ int mpirank; /* MPI number for given process*/ int mpinprocs; /* MPI number of processes */ }; typedef enum { /* Holds the type of a variable in struct option */ Int, Int2, Long, Double } Type; typedef struct { /* for reading in the options */ char *id; /* The name of the value in the option file*/ Type type; /* The type (int, double or long) */ BOOL set; /* Wheter the variable has been set */ void *var; /* The variable */ } Option; struct conf{ /* Configuration of the system*/ struct particles * particle; /* All particles*/ struct vector box; /* Box size*/ double sysvolume; /* Something like total mass*/ struct vector syscm; /* System center of mass*/ }; struct filenames { /* input files */ char configurationinfile[30]; char topologyfile[30]; char optionsfile[30]; char wlinfile[30]; /* output files */ char configurationoutfile[30]; char moviefile[30]; char wloutfile[30]; char statfile[30]; char clusterfile[30]; char clusterstatfile[30]; char energyfile[30]; }; struct mpiexchangedata{ /* extra type for mpi communication*/ struct vector box; /* box of configuration */ double energy; /* energy of configuration */ double volume; /* volume of configuration */ int accepted; /* bool if accepted */ struct vector syscm; /* system CM of configuration */ long radiusholemax; /* size of array for WL*/ long wl_order[2]; /* wang-landau order parameter*/ }; #ifdef MPI MPI_Datatype MPI_vector, MPI_Particle, MPI_exchange; #endif const struct stat nullstat = {0.0, 0.0, 0, 0.0, 0.0}; long seed = 6; /* Seed for random number generator */ /*..............................................................................*/ int main(int argc, char **argv) { DEBUG("start"); FILE *outfile,*mov; /* Handle for writing configuration */ double (* intfce[MAXT][MAXT])(struct interacts *); /*array of interaction functions*/ struct topo topo; /* will maybe contain all the topo stuff in future */ struct sim sim; /* Should contain the simulation options. */ struct conf conf; /* Should contain fast changing particle and box(?) information */ struct filenames files; int memoryalloc(struct conf * conf); int memorydealloc(struct conf * conf, struct topo * topo, struct sim * sim); void read_options(struct sim* sim, char filename[30]); void init_top(struct topo *, struct conf * conf, struct sim * sim, char filename[30]); void init_config(struct topo * topo, struct conf * conf, struct sim * sim, char filename[30]); void init_intfce(double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo); void draw(FILE *, struct conf * conf, struct topo * topo); void printeqstat(struct disp *, double, int); void simulate(long nsweeps, long adjust, long paramfrq, long report, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct sim * sim, struct conf * conf, struct filenames *files); void init_pairlist(struct topo * topo, struct sim * sim); void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf); void print_pairlist(FILE * stream, struct sim * sim, struct topo * topo); int gen_clusterlist(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *) ); int print_clusterlist(FILE * stream, BOOL decor, struct topo * topo, struct sim * sim, struct conf * conf); int print_clusters(FILE * stream, BOOL decor, struct sim * sim); int print_clusterstat(FILE * stream, BOOL decor, struct sim * sim); int sort_clusterlist(struct topo * topo, struct sim * sim); printf ("\nPatchy Spherocylinders version 3.5 "); sprintf(files.configurationinfile, "config.init"); sprintf(files.configurationoutfile, "config.last"); sprintf(files.optionsfile, "options"); sprintf(files.topologyfile, "top.init"); sprintf(files.moviefile, "movie"); sprintf(files.wlinfile, "wl.dat"); sprintf(files.wloutfile, "wl-new.dat"); sprintf(files.statfile, "stat.dat"); sprintf(files.clusterfile, "cluster.dat"); sprintf(files.clusterstatfile, "cluster_stat.dat"); sprintf(files.energyfile, "energy.dat"); #ifdef MPI FILE *infile; printf(" MPI version"); MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD, &(sim.mpinprocs) ); MPI_Comm_rank(MPI_COMM_WORLD, &(sim.mpirank) ); sprintf(files.configurationoutfile, "%dconfig.last", sim.mpirank); sprintf(files.moviefile, "%dmovie", sim.mpirank); sprintf(files.wloutfile, "%dwl-new.dat", sim.mpirank); sprintf(files.clusterfile, "%dcluster.dat", sim.mpirank); sprintf(files.clusterstatfile, "%dcluster_stat.dat", sim.mpirank); sprintf(files.energyfile, "%denergy.dat", sim.mpirank); sprintf(files.statfile, "%dstat.dat", sim.mpirank); /*test if there is a specific input configuration for mpi run*/ sprintf(files.configurationinfile, "%dconfig.init", sim.mpirank); infile = fopen(files.configurationinfile, "r"); if (infile != NULL) fclose (infile); else sprintf(files.configurationinfile, "config.init"); /*test if there is a specific input wang-landau for mpi run*/ sprintf(files.wlinfile, "%dwl.dat", sim.mpirank); infile = fopen(files.wlinfile, "r"); if (infile != NULL) fclose (infile); else sprintf(files.wlinfile, "wl.dat"); #endif printf ("\n-------------------------------------\n"); printf ("Reading options...\n"); read_options(&sim,files.optionsfile); init_top(&topo, &conf, &sim,files.topologyfile); if (topo.chainnum ==0) { /*no chain make the probability of moving them 0*/ if (sim.chainprob > 0) printf ("No chains... chain move probability set to 0.\n"); sim.chainprob = 0; } printf ("\nReading configuration...\n"); init_config(&topo, &conf, &sim, files.configurationinfile); printf ("Equilibration of maximum step sizes: %ld sweeps\n", sim.nequil/2); fflush (stdout); if ( sim.wlm[0] > 0 ) { outfile = fopen(files.wlinfile, "r"); if (outfile == NULL) { printf ("ERROR: Cannot open file for Wang-Landau method (%s).\n",files.wlinfile); memorydealloc(&conf, &topo, &sim); exit(1); } fclose (outfile); } /* Empty movie file */ mov = fopen("movie", "w"); fclose (mov); printf ("\nInitializing energy functions...\n"); init_intfce(intfce, &topo); if (sim.pairlist_update) { init_pairlist(&topo, &sim); } if (sim.nequil) { printf("\nStart equilibration...\n"); simulate(sim.nequil/2, sim.adjust, 0, 0, intfce, &topo, &sim, &conf,&files); simulate(sim.nequil/2, 0, 0, 0, intfce, &topo, &sim, &conf,&files); printf (" Equilibrated maximum displacement / acceptance ratio: \n"); printeqstat(sim.trans,2.0,MAXT); printf (" Equilibrated maximum rotation / acceptance ratio: \n"); printeqstat(sim.rot,1.0,MAXT); printf (" Equilibrated maximum box length change / acceptance ratio: \n"); printf (" %.6le / %.6le\n", sim.edge.mx/2.0,RATIO(sim.edge)); printf (" Equilibrated maximum displacement of chain / acceptance ratio: \n"); printeqstat(sim.chainm,2.0,MAXMT); printf (" Equilibrated maximum rotation of chain / acceptance ratio: \n"); printeqstat(sim.chainr,1.0,MAXMT); printf ("\n"); printf ("Further equilibration of configuration: %ld sweeps\n", sim.nequil/2); fflush (stdout); outfile = fopen("config.eq", "w"); fprintf (outfile, "%15.8le %15.8le %15.8le\n", conf.box.x, conf.box.y, conf.box.z); draw (outfile, &conf, &topo); fclose (outfile); printf (" Equilibrated configuration written to config.eq\n"); printf (" Box dimensions: %.10lf, %.10lf, %.10lf\n\n", conf.box.x, conf.box.y, conf.box.z); } printf ("Production run: %ld sweeps\n\n", sim.nsweeps); fflush (stdout); simulate(sim.nsweeps, 0, sim.paramfrq, sim.report, intfce, &topo, &sim, &conf,&files); #ifdef MPI printf (" MPI replica changeT / changeP / acceptance ratio: \t %.6lf / %.6lf / %.6lf\n\n", sim.mpiexch.mx,sim.mpiexch.angle,RATIO(sim.mpiexch)); #endif outfile = fopen(files.configurationoutfile, "w"); fprintf (outfile, "%15.8le %15.8le %15.8le\n", conf.box.x, conf.box.y, conf.box.z); draw (outfile, &conf, &topo); fclose (outfile); // For testing the pairlist //gen_pairlist(&topo, &sim, &conf); //FILE * fpairlist; //fpairlist = fopen("pairlist.dat", "w"); //print_pairlist(fpairlist, &sim, &topo); //fclose(fpairlist); //printf("sqmaxcut = %lf\n", topo.sqmaxcut); //// For testing the cluster algorithm //gen_clusterlist(&topo, &sim, &conf); //print_clusterlist(stdout, TRUE, &topo, &sim, &conf); //sort_clusterlist(&topo, &sim); //print_clusters(stdout, TRUE, &sim); //print_clusterstat(stdout, TRUE, &sim); if (memorydealloc(&conf, &topo, &sim)) exit(1); #ifdef MPI MPI_Finalize(); #endif printf ("\nDone\n\n"); return 0; } /*..............................................................................*/ /*.........................SIMULATION RUN.......................................*/ /*..............................................................................*/ void simulate(long nsweeps, long adjust, long paramfrq, long report, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct sim * sim, struct conf * conf, struct filenames *files) { long i,j,wli; long next_adjust; /* Next sweep number for step size adjustment */ long next_calc; /* Next sweep number for order parameter calculation */ long next_dump; /* Next sweep number for reporting statistics */ long next_frame; /* Next sweep number for dumping a movie frame */ long step; /* Step number within a given sweep */ long sweep; /* Current sweep number */ //struct stat nem; /* Nematic order parameter */ //struct stat vol; /* Volume statistics */ //struct stat shapex, shapey, shapez; /* Box shape statistics */ //struct stat smec[MAXF]; /* Smectic order parameters (Fourier coeeficients) */ FILE *mf; /* Handle for movie file */ FILE *cl_stat, *cl, *cl_list; /* Handle for cluster statistics */ FILE *ef, *statf; /* Handle for energy file and statistical file*/ double edriftstart; /* Energy drift calculation - start */ double edriftchanges; /* Energy drift calculation - accumulate all changes through moves */ double edriftend; /* Energy drift calculation - end */ double pvdriftstart; /* PV drift calculation - start */ double pvdriftend; /* PV drift calculation - end */ double volume; /* volume of box*/ double moveprobab; /* random number selecting the move*/ /* function declarations */ //double nematic(long, struct particles *); double ran2(long *); //double smectic(long, struct particles *, long); double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); void accumulate(struct stat *, double); void draw(FILE *, struct conf * conf, struct topo * topo); void optimizestep(struct disp *, double, double); void optimizerot(struct disp *, double, double); void partvecinit(struct topo * topo, struct sim * sim, struct conf * conf ); int wlinit(struct wls *, char filename[30]); int wlwrite(struct wls *, char filename[30]); int wlend(struct wls *); int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim); int mesh_end(struct meshs *); long z_order(struct wls *, struct conf * conf,int); long twopartdist(struct wls *, struct conf * conf,int); void mesh_print (struct meshs *); void masscenter(long, struct ia_param [MAXT][MAXT], struct conf * conf); void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf); int write_cluster(FILE * cl_stat, FILE * cl, FILE * cl_list, BOOL decor, long sweep, struct sim * sim, struct topo * topo, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); double particlemove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); double chainmove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); double switchtypemove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); double pressuremove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); double replicaexchangemove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *), long sweep); long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *); long radiushole_position(double, struct sim *,int); long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli); double alignment_order(struct conf * conf, struct topo * topo); int memorydealloc(struct conf * conf, struct topo * topo, struct sim * sim); double paire(long, long, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct conf * conf); /* Opening files for cluster statistics */ cl_stat = cl = cl_list = ef = statf = NULL; if(sim->write_cluster){ // Empty file cl_stat = fopen(files->clusterstatfile, "w"); fclose(cl_stat); cl_stat = fopen(files->clusterstatfile, "a"); // Empty file cl = fopen(files->clusterfile, "w"); fclose(cl); cl = fopen(files->clusterfile, "a"); } /* write energy*/ if (report <= nsweeps){ // Empty file ef = fopen(files->energyfile, "w"); fclose(ef); ef = fopen(files->energyfile, "a"); fprintf (ef, "# sweep energy\n"); statf = fopen(files->statfile, "w"); fclose(statf); statf = fopen(files->statfile, "a"); fprintf (statf, "# sweep volume\n"); } /*=== Initialise counters etc. ===*/ // double pvolume; /* Volume of all particles*/ /* pvolume =0.0; for (i=0;i < topo->npart;i++) { if (conf->particle[i].type>=0 ) pvolume += topo->ia_params[conf->particle[i].type][conf->particle[i].type].volume; }*/ sim->shprob = sim->shave/(double)topo->npart; for (i=0;i<MAXT;i++){ sim->rot[i].acc = 0; sim->rot[i].rej = 0; sim->rot[i].oldrmsd = 0; sim->rot[i].oldmx = 0; sim->trans[i].acc = 0; sim->trans[i].rej = 0; sim->trans[i].oldrmsd = 0; sim->trans[i].oldmx = 0; } for (i=0;i<MAXMT;i++){ sim->chainm[i].acc = 0; sim->chainm[i].rej = 0; sim->chainm[i].oldrmsd = 0; sim->chainm[i].oldmx = 0; sim->chainr[i].acc = 0; sim->chainr[i].rej = 0; sim->chainr[i].oldrmsd = 0; sim->chainr[i].oldmx = 0; } //(*edge).acc = (*edge).rej = (*edge).oldrmsd = (*edge).oldmx = 0; sim->edge.acc = sim->edge.rej = sim->edge.oldrmsd = sim->edge.oldmx = 0; sim->mpiexch.acc = sim->mpiexch.rej = sim->mpiexch.oldrmsd = sim->mpiexch.oldmx = 0; /*Initialize some values at begining*/ partvecinit(topo,sim,conf); next_adjust = adjust; next_calc = paramfrq; next_dump = report; next_frame = sim->movie; //nem = vol = shapex = shapey = shapez = nullstat; //for (i=0; i<MAXF; i++) smec[i] = nullstat; if (sim->movie > 0) { mf = fopen(files->moviefile, "a"); } else { mf = NULL; } sim->wl.wl_meshsize = 0; sim->wl.radiushole = NULL; sim->wl.radiusholeold = NULL; sim->wl.radiusholemax = 0; sim->wl.partincontactold = 0; sim->wl.partincontact = 0; sim->wl.wlmdim = 0; sim->wl.wlmdim = 0; sim->wl.length[0]=0; sim->wl.length[1]=0; sim->wl.currorder[0]=0; sim->wl.currorder[1]=0; sim->wl.neworder[0]=0; sim->wl.neworder[1]=0; sim->wl.weights = NULL; sim->wl.hist = NULL; masscenter(topo->npart,topo->ia_params, conf); /* Initialization of wang-landaou method*/ if ( sim->wlm[0] >0 ) { if (wlinit(&sim->wl,files->wlinfile) != 0) return; sim->wl.wlmdim = 1 ; if ( sim->wlm[1] > 0 ) sim->wl.wlmdim = 2 ; for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: masscenter(topo->npart,topo->ia_params, conf); sim->wl.currorder[wli] = z_order(&sim->wl,conf,wli); break; case 2: sim->wl.wl_meshsize = (topo->ia_params[sim->wl.wlmtype][sim->wl.wlmtype].sigma) / 3.0; // TODO sim->wl.mesh.data = NULL; sim->wl.mesh.tmp = NULL; sim->wl.origmesh.data = NULL; sim->wl.origmesh.tmp = NULL; sim->wl.currorder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim) - sim->wl.minorder[wli]); break; case 3: sim->wl.currorder[wli] = (long) floor( (conf->particle[0].dir.z - sim->wl.minorder[wli])/ sim->wl.dorder[wli] ); break; case 4: sim->wl.currorder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: masscenter(topo->npart,topo->ia_params, conf); sim->wl.radiusholemax = 0; sim->wl.radiushole = NULL; sim->wl.radiusholeold = NULL; sim->wl.currorder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: sim->wl.radiusholemax = 0; sim->wl.radiushole = NULL; sim->wl.radiusholeold = NULL; sim->wl.currorder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.currorder[wli] = contparticles_all(topo,conf,sim,wli); break; default: sim->wl.currorder[wli] = 0; break; } if ( (sim->wl.currorder[wli] >= sim->wl.length[wli] ) || (sim->wl.currorder[wli] < 0) ) { printf("Error: starting Wang-Landau method with order parameter %f out of range(%f - %f)\n\n", sim->wl.dorder[wli]*sim->wl.currorder[wli] + \ sim->wl.minorder[wli], sim->wl.minorder[wli], sim->wl.minorder[wli]+sim->wl.dorder[wli]*sim->wl.length[wli] ); wlend(&sim->wl); if (memorydealloc(conf, topo, sim)) exit(1); exit(2); return; } } if (sim->wl.alpha < WL_ALPHATOL/100) sim->wl.alpha = WL_ZERO; fflush (stdout); } double e,e2; for(int i=0; i< 1/*topo->npart-1*/; ++i) { for(int j=i+1; j< topo->npart; ++j) { e = paire(i, j, intfce, topo, conf); e2 = paire(j, i, intfce, topo, conf); if(e < 1000.0) printf("%.5lf %.5lf\n", e, e2); else printf("%lf\n", 1000.0); } printf("\n"); } exit(0); /*do moves - START OF REAL MC*/ if(sim->pairlist_update){ gen_pairlist(topo, sim, conf); // Does that solve the problem? } /*do energy drift check - start calculation*/ volume = conf->box.x * conf->box.y * conf->box.z; edriftstart = calc_energy(0, intfce, 0, topo, conf, sim,0); pvdriftstart = sim->press * volume - (double)topo->npart * log(volume) / sim->temper; //printf("starting energy: %.15f \n",calc_energy(0, intfce, 0, topo, conf, sim,0)); //printf("press: %.15f\n",sim->press * volume - (double)topo->npart * log(volume) / sim->temper); edriftchanges = 0.0; for (sweep=1; sweep <= nsweeps; sweep++) { // Try replica exchange if((sim->nrepchange) && (sweep % sim->nrepchange == 0)){ edriftchanges += replicaexchangemove(topo,sim,conf,intfce,sweep); } // Generate the pairlist if((sim->pairlist_update) && (sweep % sim->pairlist_update == 0)){ gen_pairlist(topo, sim, conf); } //normal moves for (step=1; step <= topo->npart; step++) { moveprobab = ran2(&seed); if ( moveprobab < sim->shprob) { /* pressure moves*/ edriftchanges += pressuremove(topo,sim,conf,intfce); } else { if (moveprobab < sim->shprob + sim->chainprob) { /* single particle moves*/ edriftchanges += chainmove(topo,sim,conf,intfce); } else if (moveprobab < sim->shprob + sim->chainprob + sim->switchprob){ /*=== This is an attempt to switch a type ===*/ edriftchanges += switchtypemove(topo,sim,conf,intfce); } else { /* single particle moves*/ edriftchanges += particlemove(topo,sim,conf,intfce); } /* end of else next to chain moves */ } /* end of else next to volume moves */ } /**** End of step loop for this sweep ****/ /*=== Start of end-of-sweep housekeeping ===*/ /* Adjustment of maximum step sizes during equilibration */ if (sweep == next_adjust) { for (i = 0; i < MAXT ;i++) { if ((sim->trans[i].acc > 0)||(sim->trans[i].rej >0)) optimizestep (sim->trans + i, 1.5, 0.0); if ((sim->rot[i].acc > 0)||(sim->rot[i].rej >0)) optimizerot (sim->rot + i, 5.0, 0.01); } for (i = 0; i < MAXMT; i++) { if ((sim->chainm[i].acc > 0)||(sim->chainm[i].rej > 0)) optimizestep (sim->chainm + i, 1.5, 0.0); if ((sim->chainr[i].acc > 0)||(sim->chainr[i].rej > 0)) optimizerot (sim->chainr + i, 5.0, 0.01); } optimizestep (&(sim->edge), 1.0, 0.0); next_adjust += adjust; } if ( (sim->wlm[0] > 0) && (sim->wl.alpha > WL_ZERO) && !(sweep % 1000) ) { sim->wl.min = sim->wl.hist[0]; sim->wl.max = sim->wl.hist[0]; for (i=0;i < sim->wl.length[0];i++) { j=0; if ( sim->wl.hist[i+j*sim->wl.length[0]] > sim->wl.max ) sim->wl.max = sim->wl.hist[i+j*sim->wl.length[0]]; if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]]; for (j=1;j < sim->wl.length[1];j++) { if ( sim->wl.hist[i+j*sim->wl.length[0]] > sim->wl.max ) sim->wl.max = sim->wl.hist[i+j*sim->wl.length[0]]; if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]]; } } if ( sim->wl.min > WL_MINHIST ) { if ( sim->temper * log(sim->wl.max/sim->wl.min) < WL_GERR ) { /*DEBUG for (i=1;i<wl.length;i++) { printf (" %15.8le %15ld %15.8f\n",sim->wl.weights[i],sim->wl.hist[i],particle[0].pos.z); fflush(stdout); } */ if ( sim->wl.alpha < WL_ALPHATOL) { printf("\nF I N I S H E D\n\n"); fflush (stdout); break; } sim->wl.alpha/=2; printf("%f \n", sim->wl.alpha); fflush (stdout); sim->wl.wmin = sim->wl.weights[0]; for (i=0;i < sim->wl.length[0];i++) { j=0; sim->wl.hist[i+j*sim->wl.length[0]] = 0; sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin; for (j=1;j < sim->wl.length[1];j++) { sim->wl.hist[i+j*sim->wl.length[0]] = 0; sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin; } } } } } if (!(sweep % 10000)) { /*reinitialize pach vectors to avoid cummulation of errors*/ partvecinit(topo,sim,conf); } /* Sampling of statistics */ if (sweep == next_calc) { /*s2 = nematic(npart, particle); accumulate (&nem, s2); for (i=0; i<terms; i++) { ci = smectic(npart, particle, i+1); accumulate (&smec[i], ci); } accumulate (&shapex, (*box).x); accumulate (&shapey, (*box).y); accumulate (&shapez, (*box).z); volume = (*box).x * (*box).y * (*box).z; accumulate (&vol, volume); next_calc += paramfrq; */ } /* Writing of statistics */ if (sweep == next_dump) { /*printf ("Statistics after %ld sweeps:\n", sweep); printf (" Mean and RMS fluctuation of S2: %13.8lf %13.8lf\n", nem.mean, nem.rms); for (i=0; i<terms; i++) { printf (" Mean & fluc. Fourier coeff. %3ld: %13.8lf %13.8lf\n", i+1, smec[i].mean, smec[i].rms); } printf (" Mean & fluc box dimensions: x %13.8lf %13.8lf\n", shapex.mean, shapex.rms); printf (" y %13.8lf %13.8lf\n", shapey.mean, shapey.rms); printf (" z %13.8lf %13.8lf\n", shapez.mean, shapez.rms); printf (" Mean & fluctuation volume: %13.8lf %13.8lf\n", vol.mean, vol.rms); printf (" Mean & fluc. volume over volume of particles: %13.8lf %13.8lf\n", vol.mean/pvolume, vol.rms/pvolume); printf ("\n"); fflush (stdout); */ fprintf (statf, " %ld; %.10lf\n", sweep, conf->box.x * conf->box.y * conf->box.z); fprintf (ef, " %ld; %.10lf %f \n", sweep, calc_energy(0, intfce, 0, topo, conf, sim,0), alignment_order(conf,topo)); if (sim->wlm[0] > 0) { wlwrite(&sim->wl,files->wloutfile); } next_dump += report; } /* Writing of movie frame */ if (sweep == next_frame) { fprintf (mf, "%ld\n", topo->npart); fprintf (mf, "sweep %ld; box %.10lf %.10lf %.10lf\n", sweep, conf->box.x, conf->box.y, conf->box.z); draw (mf, conf, topo); fflush (mf); next_frame += sim->movie; } /* Writing out cluster statistics */ if(sim->write_cluster && (sweep % sim->write_cluster == 0)){ write_cluster(cl_stat, cl, cl_list, FALSE, sweep, sim, topo, conf, intfce); } /*=== End of housekeeping ===*/ } /**** End of sweeps loop ****/ /*do energy drift check - at the end calculation*/ volume = conf->box.x * conf->box.y * conf->box.z; edriftend = calc_energy(0, intfce, 0, topo, conf, sim,0); pvdriftend = sim->press * volume - (double)topo->npart * log(volume) / sim->temper; printf("Energy drift: %.15lf \n",edriftend - edriftstart - edriftchanges +pvdriftend -pvdriftstart); printf("Starting energy+pv: %.8lf \n",edriftstart+pvdriftstart); printf("Starting energy: %.8lf \n",edriftstart); fflush(stdout); /* End wang-landau*/ if (sim->wlm[0] > 0) { sim->wl.min = sim->wl.hist[0]; for (i=0;i < sim->wl.length[0];i++) { j=0; if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]]; for (j=1;j < sim->wl.length[1];j++) { if ( sim->wl.hist[i+j*sim->wl.length[0]] < sim->wl.min ) sim->wl.min = sim->wl.hist[i+j*sim->wl.length[0]]; } } sim->wl.wmin = sim->wl.weights[0]; for (i=0;i < sim->wl.length[0];i++) { j=0; sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin; for (j=1;j < sim->wl.length[1];j++) { sim->wl.weights[i+j*sim->wl.length[0]] -= sim->wl.wmin; } } wlwrite(&sim->wl,files->wloutfile); wlend(&sim->wl); if ( (sim->wlm[0] == 2)||(sim->wlm[1] == 2) ) { mesh_end(&sim->wl.mesh); mesh_end(&sim->wl.origmesh); } if ( (sim->wlm[0] == 5)||(sim->wlm[1] == 5)||(sim->wlm[0] == 6)||(sim->wlm[1] == 6) ) { if ( sim->wl.radiushole != NULL ) free(sim->wl.radiushole); if ( sim->wl.radiusholeold != NULL ) free(sim->wl.radiusholeold); } } /*end movie*/ if (sim->movie > 0) fclose (mf); /*end cluster*/ if(sim->write_cluster){ fclose(cl_stat); fclose(cl); } if (report < nsweeps) { fclose(ef); fclose(statf); } } /*..................................MOVES.........................................*/ /*................................................................................*/ /*..............................PARTICLE MOVES....................................*/ double particlemove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)) { double edriftchanges =0.0; long target; double ran2(long *); double partdisplace(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *),long target); double partrotate(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *),long target); /*=== This is a particle move step ===*/ target = ran2(&seed) * topo->npart; if ( ((ran2(&seed) < 0.5) || (topo->ia_params[conf->particle[target].type][conf->particle[target].type].geotype[0] >= SP)) ) { /* no rotation for spheres */ //target = 1; //printf ("displacement\n\n"); edriftchanges = partdisplace(topo,sim,conf,intfce,target); } else { /*=== Rotation step ===*/ edriftchanges = partrotate(topo,sim,conf,intfce,target); } /*=== End particle move step ===*/ return edriftchanges; } /*................................................................................*/ double partdisplace(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *),long target) { double edriftchanges,energy,enermove,wlener; struct vector orig, dr, origsyscm; int reject=0,wli; double radiusholemax_orig=0; double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); int movetry(double, double, double); void wlreject(struct sim *, long); void wlaccept(int, struct wls *); long meshorder_moveone(struct vector, struct vector, struct meshs *, long, long, \ struct conf * conf, struct sim * sim, int wli); int mesh_cpy(struct meshs *, struct meshs *); //void mesh_print (struct meshs *); long z_order(struct wls *, struct conf * conf, int wli); long twopartdist(struct wls *, struct conf *conf, int wli); struct vector ranvec(void); int longarray_cpy (long **, long **, long, long); long radiusholeorder_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target, int wli,struct vector *); long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *); long contparticles_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target,int wli); long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli); /*=== Displacement step ===*/ edriftchanges =0.0; origsyscm.x = 0; origsyscm.y = 0; origsyscm.z = 0; energy = calc_energy(target, intfce, 1, topo, conf, sim,0); orig = conf->particle[target].pos; dr = ranvec(); //ran = sqrt(ran2(&seed)); dr.x *= sim->trans[conf->particle[target].type].mx/conf->box.x; dr.y *= sim->trans[conf->particle[target].type].mx/conf->box.y; dr.z *= sim->trans[conf->particle[target].type].mx/conf->box.z; conf->particle[target].pos.x += dr.x; conf->particle[target].pos.y += dr.y; conf->particle[target].pos.z += dr.z; //} while (conf->particle[target].pos.x < 0.25 || conf->particle[target].pos.x > 0.50); reject = 0; wlener = 0.0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: origsyscm = conf->syscm; conf->syscm.x += dr.x * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume; conf->syscm.y += dr.y * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume; conf->syscm.z += dr.z * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume; sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli); break; case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = meshorder_moveone(orig, conf->particle[target].pos, &sim->wl.mesh, topo->npart, target, conf, sim,wli); break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; origsyscm = conf->syscm; conf->syscm.x += dr.x * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume; conf->syscm.y += dr.y * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume; conf->syscm.z += dr.z * topo->ia_params[conf->particle[target].type][conf->particle[target].type].volume / conf->sysvolume; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); if ( target == 0 ) sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); else sim->wl.neworder[wli] = radiusholeorder_moveone(&orig, conf, sim,target,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; if ( target == 0 ) sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); else sim->wl.neworder[wli] = contparticles_moveone(&orig,conf,sim,target,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calcualte energy */ enermove = calc_energy(target, intfce, 1, topo, conf, sim,0); } if ( reject || movetry(energy, enermove, sim->temper) ) { /* probability acceptance */ conf->particle[target].pos = orig; sim->trans[conf->particle[target].type].rej++; if ( (sim->wlm[0] == 1) || (sim->wlm[0] == 5) || (sim->wlm[1] == 1) || (sim->wlm[1] == 5) ) conf->syscm = origsyscm; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->trans[conf->particle[target].type].acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; //printf("%lf\t%lf\n", conf->particle[0].pos.z * conf->box.z , enermove); //printf("%.12f\t%.12f\t%.12f\n", energy , enermove,edriftchanges); } return edriftchanges; } /*................................................................................*/ double partrotate(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *),long target) { double edriftchanges,energy,enermove,wlener; struct particles origpart; int reject=0,wli; double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); int movetry(double, double, double); void wlreject(struct sim *,long); void wlaccept(int, struct wls *); void normalise(struct vector *); void ortogonalise(struct vector *,struct vector); void psc_rotate(struct particles *,double,int); /*=== Rotation step ===*/ //printf ("rotation %ld npart %ld\n\n",target,npart); energy = calc_energy(target, intfce, 1, topo, conf, sim,0); origpart = conf->particle[target]; psc_rotate(&conf->particle[target],sim->rot[conf->particle[target].type].angle, topo->ia_params[conf->particle[target].type][conf->particle[target].type].geotype[0]); /*should be normalised and ortogonal but we do for safety*/ normalise (&conf->particle[target].dir); ortogonalise(&conf->particle[target].patchdir[0],conf->particle[target].dir); reject = 0; edriftchanges =0.0; wlener = 0.0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 3: if (target == 0) sim->wl.neworder[wli] = (long) floor( (conf->particle[0].dir.z - sim->wl.minorder[wli])/ sim->wl.dorder[wli] ); else sim->wl.neworder[wli] = sim->wl.currorder[wli]; /* only rotation change direction */ break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calcualte energy */ enermove = calc_energy(target, intfce, 1, topo, conf, sim,0); } if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */ conf->particle[target] = origpart; sim->rot[conf->particle[target].type].rej++; wlreject(sim,sim->wl.radiusholemax); } else { /* move was accepted */ // DEBUG //fprintf(fenergy, "%lf\t%lf\n", conf->particle[1].pos.x * conf->box.x , enermove); sim->rot[conf->particle[target].type].acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; //printf("%lf\t%lf\n", conf->particle[0].patchdir[0].z, enermove); } return edriftchanges; } /*..................... This is an attempt to switch a type.................................*/ double switchtypemove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *) ) { double edriftchanges,energy,enermove,wlener; int reject=0,wli; long target; double radiusholemax_orig=0; double ran2(long *); double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); int movetry(double, double, double); void wlreject(struct sim *,long); void wlaccept(int, struct wls *); void int_partvec(long, struct ia_param *, struct conf *); int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim); int mesh_cpy(struct meshs *, struct meshs *); long z_order(struct wls *, struct conf * conf,int); long twopartdist(struct wls *, struct conf *conf,int); long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *); int longarray_cpy (long **target, long **source,long,long); long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli); /*=== This is an attempt to switch a type ===*/ edriftchanges =0.0; wlener = 0.0; target = ran2(&seed) * topo->n_switch_part; target = topo->switchlist[target]; DEBUG_SIM("Switching the particle type"); DEBUG_SIM("PARTICLE: %ld", target); energy = calc_energy(target, intfce, 1, topo, conf, sim,0); // Start switching the type int switched = conf->particle[target].switched; int pmone = PMONE(switched); DEBUG_SIM("switched = %d", switched); DEBUG_SIM("pmone = %d", pmone); int tmp_type = conf->particle[target].type; conf->particle[target].type = conf->particle[target].switchtype; conf->particle[target].switchtype = tmp_type; conf->particle[target].switched += pmone; int_partvec(target,&(topo->ia_params[conf->particle[target].type][conf->particle[target].type]),conf); DEBUG_SIM("Particle %ld is %d switched", target, switched); //DEBUG #ifdef DEBUGGING_SIM if ((abs(pmone) != 1) || (conf->particle[target].type == conf->particle[target].switchtype)){ fprintf(stderr, "ERROR: Something went wrong, when switching the type of particle %ld\n", target); exit(1); } #endif if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { /*case 1: sim->wl.neworder = z_order(&sim->wl, conf,wli); break;*/ case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize,topo->npart, conf, sim) - sim->wl.minorder[wli]); break; /*case 4: sim->wl.neworder = twopartdist(&sim->wl,conf,wli); break;*/ case 5: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { enermove = conf->particle[target].delta_mu * pmone; // DEBUG //double dmu = enermove; //particle[target].switched += pmone; enermove += calc_energy( target, intfce, 1, topo, conf, sim,0); //printf("energy: %lf \t %lf\t%lf\n",particle[target].delta_mu, dmu, enermove); } // If not accepted: switch back if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */ DEBUG_SIM("Did NOT switch it\n"); conf->particle[target].switchtype = conf->particle[target].type; conf->particle[target].type = tmp_type; conf->particle[target].switched -= pmone; int_partvec(target,&(topo->ia_params[conf->particle[target].type][conf->particle[target].type]),conf); wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } return edriftchanges; } /*.................................CHAIN MOVES....................................*/ /*................................................................................*/ double chainmove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)) { double edriftchanges =0.0; long target; double ran2(long *); double chaindisplace(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *), long target); double chainrotate(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *), long target); /*=== This is a chain move step ===*/ target = ran2(&seed) * topo->chainnum; if (ran2(&seed) < 0.5) { /*=== Displacement step of cluster/chain ===*/ edriftchanges = chaindisplace(topo,sim,conf,intfce,target); } else { /*=== Rotation step of cluster/chain ===*/ edriftchanges = chainrotate(topo,sim,conf,intfce,target); } /* ==== END OF CHAIN MOVES ===== */ return edriftchanges; } /*................................................................................*/ double chaindisplace(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *), long target) { double edriftchanges,energy,enermove,wlener; struct vector dr, origsyscm; int reject=0,wli; struct vector cluscm; long current,i; struct particles chorig[MAXCHL]; double radiusholemax_orig=0; double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); int movetry(double, double, double); void wlreject(struct sim *,long); void wlaccept(int, struct wls *); long meshorder_movechain(long [MAXN], struct meshs *, long, struct conf * conf, \ struct sim * sim, struct particles chorig[MAXCHL],int); int mesh_cpy(struct meshs *, struct meshs *); //void mesh_print (struct meshs *); long z_order(struct wls *, struct conf * conf,int); long twopartdist(struct wls *, struct conf *conf,int); struct vector ranvec(void); int longarray_cpy (long **target, long **source,long,long); long radiusholeorder_movechain(long chain[MAXN], struct conf * conf, \ struct sim * sim,struct particles chorig[MAXCHL],int,struct vector *); long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *); long contparticles_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli); long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli); /*=== Displacement step of cluster/chain ===*/ //printf ("move chain\n\n"); energy =0.0; wlener = 0.0; edriftchanges=0.0; i=0; current = topo->chainlist[target][0]; cluscm.x = 0; cluscm.y = 0; cluscm.z = 0; origsyscm.x = 0; origsyscm.y = 0; origsyscm.z = 0; while (current >=0 ) { /* store old configuration calculate energy*/ chorig[i].pos = conf->particle[current].pos; energy += calc_energy(current, intfce, 2, topo, conf, sim, target); i++; current = topo->chainlist[target][i]; } dr = ranvec(); dr.x *= sim->chainm[conf->particle[target].chaint].mx/conf->box.x; dr.y *= sim->chainm[conf->particle[target].chaint].mx/conf->box.y; dr.z *= sim->chainm[conf->particle[target].chaint].mx/conf->box.z; i=0; current = topo->chainlist[target][0]; while (current >=0 ) { /* move chaine to new position */ if ( (sim->wlm[0] == 1) || (sim->wlm[0] == 5) || (sim->wlm[1] == 1) || (sim->wlm[1] == 5) ) { /* calculate move of center of mass */ cluscm.x += dr.x*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; cluscm.y += dr.y*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; cluscm.z += dr.z*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; } conf->particle[current].pos.x += dr.x; conf->particle[current].pos.y += dr.y; conf->particle[current].pos.z += dr.z; i++; current = topo->chainlist[target][i]; } enermove = 0.0; reject = 0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: origsyscm = conf->syscm; conf->syscm.x += cluscm.x / conf->sysvolume; conf->syscm.y += cluscm.y / conf->sysvolume; conf->syscm.z += cluscm.z / conf->sysvolume; sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli); break; case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = meshorder_movechain(topo->chainlist[target], &sim->wl.mesh, topo->npart, conf, sim, chorig,wli); break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; origsyscm = conf->syscm; conf->syscm.x += cluscm.x / conf->sysvolume; conf->syscm.y += cluscm.y / conf->sysvolume; conf->syscm.z += cluscm.z / conf->sysvolume; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); if ( target == 0 ) sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); else sim->wl.neworder[wli] = radiusholeorder_movechain(topo->chainlist[target], conf, sim, chorig,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; if ( target == 0 ) sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); else sim->wl.neworder[wli] = contparticles_movechain(topo->chainlist[target],conf,sim,chorig,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calcualte energy */ i=0; current = topo->chainlist[target][0]; while (current >=0 ) { enermove += calc_energy(current, intfce, 2, topo, conf, sim,target); i++; current = topo->chainlist[target][i]; } } if ( reject || movetry(energy, enermove, sim->temper) ) { /* probability acceptance */ i=0; current = topo->chainlist[target][0]; while (current >=0 ) { conf->particle[current].pos = chorig[i].pos; i++; current = topo->chainlist[target][i]; } sim->chainm[conf->particle[target].chaint].rej++; if ( (sim->wlm[0] == 1) || (sim->wlm[0] == 5) || (sim->wlm[1] == 1) || (sim->wlm[1] == 5) ) conf->syscm = origsyscm; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->chainm[conf->particle[target].chaint].acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } return edriftchanges; } /*................................................................................*/ double chainrotate(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *), long target) { double edriftchanges,energy,enermove,wlener; int reject=0,wli; struct vector cluscm; double chainvolume; long current, i; struct particles chorig[MAXCHL]; double radiusholemax_orig=0; double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); int movetry(double, double, double); void wlreject(struct sim *,long); void wlaccept(int, struct wls *); long meshorder_movechain(long [MAXN], struct meshs *, long, struct conf * conf, \ struct sim * sim, struct particles chorig[MAXCHL],int); int mesh_cpy(struct meshs *, struct meshs *); void cluster_rotate(long, struct vector, double, struct topo * topo, struct conf * conf); long z_order(struct wls *, struct conf * conf,int); long twopartdist(struct wls *, struct conf *conf,int); int longarray_cpy (long **target, long **source,long,long); long radiusholeorder_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,\ struct particles chorig[MAXCHL],int,struct vector *); long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *); long contparticles_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli); long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli); /*=== Rotation step of cluster/chain ===*/ //printf ("rotation of chain\n\n"); energy=0.0; /* set values to zero*/ edriftchanges=0.0; wlener = 0.0; current = topo->chainlist[target][0]; cluscm.x = conf->particle[current].pos.x*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; cluscm.y = conf->particle[current].pos.y*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; cluscm.z = conf->particle[current].pos.z*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; chorig[0] = conf->particle[current]; chainvolume = topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; energy += calc_energy(current, intfce, 2, topo, conf, sim,target); i=1; current = topo->chainlist[target][i]; while (current >=0 ) { /* store old configuration calculate energy*/ chorig[i] = conf->particle[current]; /*We have chains whole! don't have to do PBC*/ /*r_cm.x = conf->particle[current].pos.x - conf->particle[first].pos.x; r_cm.y = conf->particle[current].pos.y - conf->particle[first].pos.y; r_cm.z = conf->particle[current].pos.z - conf->particle[first].pos.z; if ( r_cm.x < 0 ) r_cm.x -= (double)( (long)(r_cm.x-0.5) ); else r_cm.x -= (double)( (long)(r_cm.x+0.5) ); if ( r_cm.y < 0 ) r_cm.y -= (double)( (long)(r_cm.y-0.5) ); else r_cm.y -= (double)( (long)(r_cm.y+0.5) ); if ( r_cm.z < 0 ) r_cm.z -= (double)( (long)(r_cm.z-0.5) ); else r_cm.z -= (double)( (long)(r_cm.z+0.5) ); */ cluscm.x += conf->particle[current].pos.x*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; cluscm.y += conf->particle[current].pos.y*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; cluscm.z += conf->particle[current].pos.z*topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; chainvolume += topo->ia_params[conf->particle[current].type][conf->particle[current].type].volume; energy += calc_energy(current, intfce, 2, topo, conf, sim,target); i++; current = topo->chainlist[target][i]; } cluscm.x = cluscm.x/chainvolume; cluscm.y = cluscm.y/chainvolume; cluscm.z = cluscm.z/chainvolume; /*do actual rotations around geometrical center*/ cluster_rotate(target, cluscm, sim->chainr[conf->particle[target].chaint].angle, topo, conf); enermove=0.0; reject = 0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: if (target == 0) sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli); else sim->wl.neworder[wli] = sim->wl.currorder[wli]; /* if we rotated cluster it is around its CM so no change*/ break; case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = meshorder_movechain(topo->chainlist[target], &sim->wl.mesh, topo->npart, conf, sim, chorig,wli); break; case 3: if (target == 0) sim->wl.neworder[wli] = (long) floor( (conf->particle[0].dir.z - sim->wl.minorder[wli])/ sim->wl.dorder[wli] ); else sim->wl.neworder[wli] = sim->wl.currorder[wli]; /* only rotation change direction */ break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); if ( target == 0 ) sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); else sim->wl.neworder[wli] = radiusholeorder_movechain(topo->chainlist[target], conf, sim, chorig,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; if ( target == 0 ) sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); else sim->wl.neworder[wli] = contparticles_movechain(topo->chainlist[target],conf,sim,chorig,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener += sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calcualte energy */ i=0; current = topo->chainlist[target][0]; while (current >=0 ) { enermove += calc_energy(current, intfce, 2, topo, conf, sim,target); i++; current = topo->chainlist[target][i]; } } if ( reject || movetry(energy, enermove, sim->temper) ) { /* probability acceptance */ i=0; current = topo->chainlist[target][0]; while (current >=0 ) { conf->particle[current] = chorig[i]; i++; current = topo->chainlist[target][i]; } sim->chainr[conf->particle[target].chaint].rej++; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->chainr[conf->particle[target].chaint].acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } return edriftchanges; } /*..............................PRESSURE MOVES....................................*/ /*................................................................................*/ double pressuremove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)) { double edriftchanges,energy,enermove,wlener; int reject=0,wli; double old_side; /* Box length before attempted change */ double *side; /* Box dimension to try changing */ double psch; /* Size of a box change during pressure */ double pvol; /* Size of a volume during pressure */ double pvoln; /* Size of a new volume during pressure */ double rsave; /* Saved random number */ double area; double radiusholemax_orig=0; double ran2(long *); double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); int movetry(double, double, double); void wlreject(struct sim *,long); void wlaccept(int, struct wls *); int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim); int mesh_cpy(struct meshs *, struct meshs *); long z_order(struct wls *, struct conf * conf,int); long twopartdist(struct wls *, struct conf *conf,int); long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int, struct vector *); int longarray_cpy (long **target, long **source,long,long); long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli); /*=== This is a volume change step ===*/ /*calculate energy*/ edriftchanges=0.0; wlener = 0.0; energy = calc_energy(0, intfce, 0, topo, conf, sim,0); /* Choose an edge */ switch (sim->ptype) { case 0: /* Anisotropic pressure coupling */ rsave = ran2(&seed); if (rsave < 1.0/3.0) { side = &(conf->box.x); area = conf->box.y * conf->box.z; } else if (rsave < 2.0/3.0) { side = &(conf->box.y); area = conf->box.x * conf->box.z; } else { side = &(conf->box.z); area = conf->box.x * conf->box.y; } old_side = *side; *side += sim->edge.mx * (ran2(&seed) - 0.5); reject = 0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli); break; case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim) - sim->wl.minorder[wli]); break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calculate energy */ enermove = sim->press * area * (*side - old_side) - (double)topo->npart * log(*side/old_side) / sim->temper; enermove += calc_energy(0, intfce, 0, topo, conf, sim,0); } if ( reject || *side <= 0.0 || ( movetry(energy,enermove,sim->temper) ) ) { /* probability acceptance */ *side = old_side; sim->edge.rej++; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->edge.acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } break; case 1: /* Isotropic pressure coupling */ psch = sim->edge.mx * (ran2(&seed) - 0.5); pvol = conf->box.x * conf->box.y * conf->box.z; conf->box.x += psch; conf->box.y += psch; conf->box.z += psch; pvoln = conf->box.x * conf->box.y * conf->box.z; reject = 0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: sim->wl.neworder[wli] = z_order(&sim->wl,conf,wli); break; case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim) - sim->wl.minorder[wli]); break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calcualte energy */ enermove = sim->press * (pvoln - pvol) - (double)topo->npart * log(pvoln/pvol) / sim->temper; enermove += calc_energy(0, intfce, 0, topo, conf, sim,0); } if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */ conf->box.x -= psch; conf->box.y -= psch; conf->box.z -= psch; sim->edge.rej++; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->edge.acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } break; case 2: /* Isotropic pressure coupling in xy, z constant */ psch = sim->edge.mx * (ran2(&seed) - 0.5); pvol = conf->box.x * conf->box.y; conf->box.x += psch; conf->box.y += psch; pvoln = conf->box.x * conf->box.y; reject = 0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { /*no change in case 1, it does not change box.z*/ case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize,topo->npart, conf, sim) - sim->wl.minorder[wli]); break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calculate energy */ enermove = sim->press * conf->box.z * (pvoln - pvol) - (double)topo->npart * log(pvoln/pvol) / sim->temper; enermove += calc_energy(0, intfce, 0, topo, conf, sim,0); } if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */ conf->box.x -= psch; conf->box.y -= psch; sim->edge.rej++; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->edge.acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } break; case 3: /* Isotropic pressure coupling in xy, z coupled to have fixed volume */ psch = sim->edge.mx * (ran2(&seed) - 0.5); pvol = conf->box.x * conf->box.y * conf->box.z; conf->box.x += psch; conf->box.y += psch; conf->box.z = pvol / conf->box.x / conf->box.y; reject = 0; if (sim->wlm[0] > 0) { /* get new neworder for wang-landau */ for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 1: sim->wl.neworder[wli] = z_order(&sim->wl, conf,wli); break; case 2: mesh_cpy(&sim->wl.origmesh,&sim->wl.mesh); sim->wl.neworder[wli] = (long) (mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize,topo->npart, conf, sim) - sim->wl.minorder[wli]); break; case 4: sim->wl.neworder[wli] = twopartdist(&sim->wl,conf,wli); break; case 5: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->syscm)); break; case 6: radiusholemax_orig = sim->wl.radiusholemax; longarray_cpy(&sim->wl.radiusholeold,&sim->wl.radiushole,sim->wl.radiusholemax,sim->wl.radiusholemax); sim->wl.neworder[wli] = radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); break; case 7: sim->wl.partincontactold = sim->wl.partincontact; sim->wl.neworder[wli] = contparticles_all(topo,conf,sim,wli); break; default: sim->wl.neworder[wli] = sim->wl.currorder[wli]; break; } if ( (sim->wl.neworder[wli] < 0) || (sim->wl.neworder[wli] >= sim->wl.length[wli]) ) reject = 1; } if (!reject) { wlener = sim->wl.weights[sim->wl.neworder[0]+sim->wl.neworder[1]*sim->wl.length[0]] - sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]; energy += wlener; } } if (!reject) { /* wang-landaou ok, try move - calculate energy */ enermove = calc_energy(0, intfce, 0, topo, conf, sim,0); } if ( reject || movetry(energy,enermove,sim->temper) ) { /* probability acceptance */ conf->box.x -= psch; conf->box.y -= psch; conf->box.z = pvol / conf->box.x / conf->box.y; sim->edge.rej++; wlreject(sim,radiusholemax_orig); } else { /* move was accepted */ sim->edge.acc++; wlaccept(sim->wlm[0],&sim->wl); edriftchanges = enermove - energy + wlener; } break; default: fprintf (stderr, "ERROR: unknown type of pressure coupling %d",sim->ptype); exit(1); } /*=== End volume change step ===*/ return edriftchanges; } /*..................... Switch replicas move in MPI ..............................*/ /*.................................................................................*/ double replicaexchangemove(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *), long sweep ) { double edriftchanges=0.0; #ifdef MPI double change, *recwlweights; MPI_Status status; int oddoreven,count,wli,sizewl = 0; struct mpiexchangedata localmpi,receivedmpi; BOOL reject; long localwl,receivedwl; double ran2(long *); void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf); int longarray_cpy (long **target, long **source,long,long); int mesh_init(struct meshs *, double, long, struct conf * conf, struct sim * sim); double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim,int chainn); void wlaccept(int, struct wls *); //int mpi_newdatatypes(); //mpi_newdatatypes(); int i; struct vector vec; struct particles part; struct mpiexchangedata exch; MPI_Aint dispstart; MPI_Datatype MPI_vector; MPI_Datatype type[3] = {MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE}; int blocklen[3] = {1, 1, 1}; MPI_Aint disp[3]; MPI_Address( &vec, &dispstart); MPI_Address( &(vec.x), &disp[0]); MPI_Address( &(vec.y), &disp[1]); MPI_Address( &(vec.z), &disp[2]); for (i=0; i <3; i++) disp[i] -= dispstart; MPI_Type_struct( 3, blocklen, disp, type, &MPI_vector); MPI_Type_commit( &MPI_vector); MPI_Datatype MPI_Particle; MPI_Datatype type2[11] = {MPI_vector,MPI_vector,MPI_vector,MPI_vector,MPI_vector, MPI_LONG, MPI_LONG, MPI_INT,MPI_INT,MPI_DOUBLE, MPI_INT}; int blocklen2[11] = {1, 1, 2,4,2,1,1,1,1,1,1,}; MPI_Aint disp2[11]; MPI_Address( &part, &dispstart); MPI_Address( &(part.pos), &disp2[0]); MPI_Address( &(part.dir), &disp2[1]); MPI_Address( &(part.patchdir), &disp2[2]); MPI_Address( &(part.patchsides), &disp2[3]); MPI_Address( &(part.chdir), &disp2[4]); MPI_Address( &(part.chaint), &disp2[5]); MPI_Address( &(part.chainn), &disp2[6]); MPI_Address( &(part.type), &disp2[7]); MPI_Address( &(part.switchtype), &disp2[8]); MPI_Address( &(part.delta_mu), &disp2[9]); MPI_Address( &(part.switched), &disp2[10]); for (i=0; i <11; i++) disp2[i] -= dispstart; MPI_Type_struct( 11, blocklen2, disp2, type2, &MPI_Particle); MPI_Type_commit( &MPI_Particle); if (sim->wl.length[1] > 0) { sizewl = sim->wl.length[1] * sim->wl.length[0]; } else { sizewl = sim->wl.length[0]; } MPI_Datatype MPI_exchange; MPI_Datatype type3[7] = {MPI_vector, MPI_DOUBLE, MPI_DOUBLE, MPI_INT, MPI_vector, MPI_LONG, MPI_LONG}; int blocklen3[7] = {1, 1, 1, 1, 1, 1, 2}; MPI_Aint disp3[7]; MPI_Address( &exch, &dispstart); MPI_Address( &(exch.box), &disp3[0]); MPI_Address( &(exch.energy), &disp3[1]); MPI_Address( &(exch.volume), &disp3[2]); MPI_Address( &(exch.accepted), &disp3[3]); MPI_Address( &(exch.syscm), &disp3[4]); MPI_Address( &(exch.radiusholemax), &disp3[5]); MPI_Address( &(exch.wl_order), &disp3[6]); for (i=0; i <7; i++) disp3[i] -= dispstart; MPI_Type_struct(7, blocklen3, disp3, type3, &MPI_exchange); MPI_Type_commit( &MPI_exchange); /*=== This is an attempt to switch replicas ===*/ localmpi.box = conf->box; localmpi.energy = calc_energy(0, intfce, 0, topo, conf, sim,0); localmpi.volume = conf->box.x * conf->box.y * conf->box.z; localmpi.accepted = 0; localmpi.syscm = conf->syscm; localmpi.radiusholemax = sim->wl.radiusholemax; recwlweights = malloc( sizeof(double) * sizewl ); for (wli=0;wli<2;wli++) { localmpi.wl_order[wli] = 0; receivedmpi.wl_order[wli] = 0; } for (wli=0;wli<sim->wl.wlmdim;wli++) { localmpi.wl_order[wli] = sim->wl.currorder[wli]; //fprintf(stdout,"wli %d %ld %ld\n\n", wli, localmpi.wl_order[wli], sim->wl.currorder[wli] ); } if ( (sweep % (2*sim->nrepchange)) == 0) /* exchange odd ones with even ones*/ oddoreven=1; else /* exchange even ones with odd ones*/ oddoreven=0; if (sim->mpinprocs == 2) oddoreven=1; count = 1; if (sim->mpirank % 2 == oddoreven) { if (sim->mpirank > 0) { MPI_Send(&localmpi, 1, MPI_exchange, sim->mpirank-1, count, MPI_COMM_WORLD); MPI_Send(sim->wl.weights, sizewl, MPI_DOUBLE, sim->mpirank-1, count, MPI_COMM_WORLD); //printf("send data: rank: %d energy: %f volume: %f pressure: %f \n",sim->mpirank,localmpi.energy,localmpi.volume,localmpi.pressure); MPI_Recv(&receivedmpi, 1, MPI_exchange, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); /*decision of accepting or rejecting the exchange was done on other process here we took received configuration (if move was accepted))*/ //printf("received data: rank: %d energy: %f volume: %f pressure: %f \n",sim->mpirank,receivedmpi.energy,receivedmpi.volume,receivedmpi.pressure); if (receivedmpi.accepted == 1) { sim->mpiexch.acc++; struct particles *temppart; temppart = malloc(topo->npart*sizeof(struct particles)); MPI_Recv(temppart, topo->npart, MPI_Particle, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD,&status); /* printf("received data: rank: %d\n", sim->mpirank); printf("part0 x %f y %f z %f\n",temppart[0].pos.x, temppart[0].pos.y, temppart[0].pos.z); printf("part1 x %f y %f z %f\n",temppart[1].pos.x, temppart[1].pos.y, temppart[1].pos.z); printf("part0 chaint %ld chainn %ld type %d\n",temppart[0].chaint,temppart[0].chainn,temppart[0].type); */ MPI_Send(conf->particle, topo->npart, MPI_Particle, sim->mpirank-1, count, MPI_COMM_WORLD); /* printf("send data: rank: %d\n",sim->mpirank); printf("part0 x %f y %f z %f\n",conf->particle[0].pos.x,conf->particle[0].pos.y,conf->particle[0].pos.z); printf("part1 x %f y %f z %f\n",conf->particle[1].pos.x,conf->particle[1].pos.y,conf->particle[1].pos.z); printf("part0 chaint %ld chainn %ld type %d\n",conf->particle[0].chaint,conf->particle[0].chainn,conf->particle[0].type); */ localmpi.accepted = receivedmpi.accepted; conf->box = receivedmpi.box; conf->syscm = receivedmpi.syscm; memcpy(conf->particle,temppart,topo->npart*sizeof(struct particles)); edriftchanges = receivedmpi.energy - localmpi.energy; edriftchanges += sim->press * (receivedmpi.volume - localmpi.volume) - (double)topo->npart * log(receivedmpi.volume / localmpi.volume) / sim->temper; if ( sim->wlm[0] >0 ) { for (wli=0;wli<sim->wl.wlmdim;wli++) { sim->wl.neworder[wli] = receivedmpi.wl_order[wli]; } wlaccept(sim->wlm[0],&sim->wl); //exchange wl data mesh size and radius hole s for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 2: /*it is complicated to send because of different sizes we would have to send sizes first and realocate corrrect mesh size and then send data it is better to recalculate (a bit slower though)*/ mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim); break; case 5: //radiushole_all(topo,conf,sim,wli,&(conf->syscm)); sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax); MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank-1, count, MPI_COMM_WORLD); longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax); sim->wl.radiusholemax=receivedmpi.radiusholemax; break; case 6: //radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax); MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank-1, count, MPI_COMM_WORLD); longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax); sim->wl.radiusholemax=receivedmpi.radiusholemax; break; case 7: //contparticles_all(topo,conf,sim,wli); MPI_Recv(&(sim->wl.partincontactold),1, MPI_LONG, sim->mpirank-1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Send(&(sim->wl.partincontact),1, MPI_LONG, sim->mpirank-1, count, MPI_COMM_WORLD); sim->wl.partincontact=sim->wl.partincontactold; break; } } } free(temppart); } else { sim->mpiexch.rej++; if ( sim->wlm[0] > 0 ) { sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]] -= sim->wl.alpha; sim->wl.hist[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]++; } } } } else { if (sim->mpirank+1 < sim->mpinprocs) { /*there is above process*/ MPI_Recv(&receivedmpi, 1, MPI_exchange, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); MPI_Recv(recwlweights, sizewl, MPI_DOUBLE, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); /*we got new configuration*/ //printf("received data: rank: %d energy: %f volume: %f \n",sim->mpirank,receivedmpi.energy,receivedmpi.volume); /*evaluate if accepte or reject the configuration*/ /*acc = exp( (1/sim->temper - 1/(sim->temper + sim.dtemp)) * (E_here - E_received) + (sim->press /sim->temper - pressure_received /(sim.temper + sim->dtemp)) * (V_here - V_received) if pressure the same it it simplier*/ reject = FALSE; change = (1/sim->temper - 1/(sim->temper + sim->dtemp)) * (localmpi.energy - receivedmpi.energy); //printf("acceptance decision: change: %f localE: %f receivedE: %f tempf: %f \n",change,localmpi.energy,receivedmpi.energy,(1/sim->temper - 1/(sim->temper + sim->dtemp))); change += (sim->press/sim->temper - (sim->press + sim->dpress)/(sim->temper + sim->dtemp)) * (localmpi.volume - receivedmpi.volume); //printf("pressf: %f \n",(sim->press/sim->temper - (sim->press + sim->dpress)/(sim->temper + sim->dtemp))); if (sim->wlm[0] > 0) { localwl = sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]; receivedwl = receivedmpi.wl_order[0] + receivedmpi.wl_order[1]*sim->wl.length[0]; //fprintf(stdout,"decide wl %ld %ld %ld energychange: %f \n", receivedmpi.wl_order[0], receivedmpi.wl_order[1], receivedwl, change ); //fprintf(stdout,"local weights %ld %f %ld %f \n",localwl,sim->wl.weights[localwl],receivedwl,sim->wl.weights[receivedwl]); change += (-sim->wl.weights[localwl] + sim->wl.weights[receivedwl] )/sim->temper + ( -recwlweights[receivedwl] + recwlweights[localwl])/(sim->temper + sim->dtemp) ; //fprintf(stdout,"wlchange %f \n\n",change); } if ( (!(reject)) && ( (change > 0) || (ran2(&seed) < exp(change)) ) ) { /* Exchange ACCEPTED send local stuff*/ //printf("exchange accepted \n"); sim->mpiexch.acc++; localmpi.accepted = 1; conf->box = receivedmpi.box; conf->syscm = receivedmpi.syscm; edriftchanges = receivedmpi.energy - localmpi.energy; edriftchanges += sim->press * (receivedmpi.volume - localmpi.volume) - (double)topo->npart * log(receivedmpi.volume / localmpi.volume) / sim->temper; //printf("edrift %f\n",edriftchanges); if ( sim->wlm[0] > 0 ) { for (wli=0;wli<sim->wl.wlmdim;wli++) { sim->wl.neworder[wli] = receivedmpi.wl_order[wli]; } wlaccept(sim->wlm[0],&sim->wl); } MPI_Send(&localmpi, 1, MPI_exchange, sim->mpirank+1, count, MPI_COMM_WORLD); //printf("send data: rank: %d energy: %f volume: %f pressure: %f \n",sim->mpirank,localmpi.energy,localmpi.volume,localmpi.pressure); /*send and receive configuration*/ MPI_Send(conf->particle, topo->npart, MPI_Particle, sim->mpirank+1, count, MPI_COMM_WORLD); /* printf("send data: rank: %d\n",sim->mpirank); printf("part0 x %f y %f z %f\n",conf->particle[0].pos.x,conf->particle[0].pos.y,conf->particle[0].pos.z); printf("part1 x %f y %f z %f\n",conf->particle[1].pos.x,conf->particle[1].pos.y,conf->particle[1].pos.z); printf("part0 chaint %ld chainn %ld type %d\n",conf->particle[0].chaint,conf->particle[0].chainn,conf->particle[0].type); */ MPI_Recv(conf->particle, topo->npart, MPI_Particle, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD,&status); /* printf("recieved data: rank: %d\n",sim->mpirank); printf("part0 x %f y %f z %f\n",conf->particle[0].pos.x,conf->particle[0].pos.y,conf->particle[0].pos.z); printf("part1 x %f y %f z %f\n",conf->particle[1].pos.x,conf->particle[1].pos.y,conf->particle[1].pos.z); printf("part0 chaint %ld chainn %ld type %d\n",conf->particle[0].chaint,conf->particle[0].chainn,conf->particle[0].type); */ if ( sim->wlm[0] > 0 ) { //exchange wl data mesh size and radius hole s for (wli=0;wli<sim->wl.wlmdim;wli++) { switch (sim->wlm[wli]) { case 2: /*it is complicated to send because of different sizes we would have to send sizes first and realocate corrrect mesh size and then send data it is better to recalculate (a bit slower though)*/ mesh_init(&sim->wl.mesh,sim->wl.wl_meshsize, topo->npart, conf, sim); break; case 5: //radiushole_all(topo,conf,sim,wli,&(conf->syscm)); sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax); MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank+1, count, MPI_COMM_WORLD); MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax); sim->wl.radiusholemax=receivedmpi.radiusholemax; break; case 6: //radiushole_all(topo,conf,sim,wli,&(conf->particle[0].pos)); sim->wl.radiusholeold = (long*) realloc(sim->wl.radiusholeold,sizeof(long)*receivedmpi.radiusholemax); MPI_Send(sim->wl.radiushole,sim->wl.radiusholemax, MPI_LONG, sim->mpirank+1, count, MPI_COMM_WORLD); MPI_Recv(sim->wl.radiusholeold,receivedmpi.radiusholemax, MPI_LONG, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,receivedmpi.radiusholemax); sim->wl.radiusholemax=receivedmpi.radiusholemax; break; case 7: //contparticles_all(topo,conf,sim,wli); MPI_Send(&(sim->wl.partincontact),1, MPI_LONG, sim->mpirank+1, count, MPI_COMM_WORLD); MPI_Recv(&(sim->wl.partincontact),1, MPI_LONG, sim->mpirank+1, MPI_ANY_TAG, MPI_COMM_WORLD, &status); break; } } } } else { /*if exchange rejected send back info */ //printf("exchange rejected\n"); sim->mpiexch.rej++; MPI_Send(&localmpi, 1, MPI_exchange, sim->mpirank+1, count, MPI_COMM_WORLD); if ( sim->wlm[0] > 0 ) { sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]] -= sim->wl.alpha; sim->wl.hist[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]++; } } } } if ( (localmpi.accepted) && (sim->pairlist_update) ) gen_pairlist(topo, sim, conf); MPI_Type_free(&MPI_exchange); MPI_Type_free(&MPI_Particle); MPI_Type_free(&MPI_vector); free(recwlweights); #endif return edriftchanges; } /*int mpi_newdatatypes() { int i; struct vector vec; struct particles part; struct mpiexchangedata exch; MPI_Aint dispstart; MPI_Datatype MPI_vector; MPI_Datatype type[3] = {MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE}; int blocklen[3] = {1, 1, 1}; MPI_Aint disp[3]; MPI_Address( &vec, &dispstart); MPI_Address( &(vec.x), &disp[0]); MPI_Address( &(vec.y), &disp[1]); MPI_Address( &(vec.z), &disp[2]); for (i=0; i <3; i++) disp[i] -= dispstart; MPI_Type_struct( 3, blocklen, disp, type, &MPI_vector); MPI_Type_commit( &MPI_vector); MPI_Datatype MPI_Particle; MPI_Datatype type2[11] = {MPI_vector,MPI_vector,MPI_vector,MPI_vector,MPI_vector, MPI_LONG, MPI_LONG, MPI_INT,MPI_INT,MPI_DOUBLE, MPI_INT}; int blocklen2[11] = {1, 1, 2,4,2,1,1,1,1,1,1,}; MPI_Aint disp2[11]; MPI_Address( &part, &dispstart); MPI_Address( &(part.pos), &disp2[0]); MPI_Address( &(part.dir), &disp2[1]); MPI_Address( &(part.patchdir), &disp2[2]); MPI_Address( &(part.patchsides), &disp2[3]); MPI_Address( &(part.chdir), &disp2[4]); MPI_Address( &(part.chaint), &disp2[5]); MPI_Address( &(part.chainn), &disp2[6]); MPI_Address( &(part.type), &disp2[7]); MPI_Address( &(part.switchtype), &disp2[8]); MPI_Address( &(part.delta_mu), &disp2[9]); MPI_Address( &(part.switched), &disp2[10]); for (i=0; i <11; i++) disp2[i] -= dispstart; MPI_Type_struct( 11, blocklen2, disp2, type2, &MPI_Particle); MPI_Type_commit( &MPI_Particle); MPI_Datatype MPI_exchange; MPI_Datatype type3[5] = {MPI_vector, MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE, MPI_INT}; int blocklen3[5] = {1, 1, 1, 1, 1}; MPI_Aint disp3[5]; MPI_Address( &exch, &dispstart); MPI_Address( &(exch.box), &disp3[0]); MPI_Address( &(exch.energy), &disp3[1]); MPI_Address( &(exch.volume), &disp3[2]); MPI_Address( &(exch.pressure), &disp3[3]); MPI_Address( &(exch.accepted), &disp3[4]); for (i=0; i <5; i++) disp3[i] -= dispstart; MPI_Type_struct( 5, blocklen3, disp3, type3, &MPI_exchange); MPI_Type_commit( &MPI_exchange); return 0; }*/ /*................................................................................*/ /*................................................................................*/ /*....................END OF MOVES, INTERACTION FUNCTIONS FOLLOW..................*/ /*................................................................................*/ /*..............................................................................*/ /* Determines total energy of two spherocylinders type PSC PSC */ double e_psc_psc(struct interacts * interact) { double atrenergy, repenergy; void closestdist(struct interacts *); double erepulsive(struct interacts *); double eattractive_psc_psc(struct interacts *,int,int); closestdist(interact); repenergy = erepulsive(interact); if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) ) atrenergy = 0.0; else { BOOL firstCH=FALSE, secondCH=FALSE; struct vector olddir1 = interact->part1->dir; struct vector olddir2 = interact->part2->dir; if ( (interact->param->geotype[0] == CHPSC) || (interact->param->geotype[0] == TCHPSC) ) firstCH = TRUE; if ( (interact->param->geotype[1] == CHPSC) || (interact->param->geotype[1] == TCHPSC) ) secondCH = TRUE; if (firstCH) interact->part1->dir = interact->part1->chdir[0]; if (secondCH) interact->part2->dir = interact->part2->chdir[0]; if ( (firstCH) || (secondCH) ) { closestdist(interact); } atrenergy = eattractive_psc_psc(interact,0,0); /*addition of interaction of second patches*/ if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) || (interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) ) { BOOL firstT=FALSE, secondT=FALSE; if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) ) firstT = TRUE; if ( (interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) ) secondT = TRUE; if (firstT) { if (firstCH && secondCH) { interact->part1->dir = interact->part1->chdir[1]; interact->part2->dir = interact->part2->chdir[0]; closestdist(interact); } if (firstCH && !secondCH) { interact->part1->dir = interact->part1->chdir[1]; closestdist(interact); } if (!firstCH && secondCH) { interact->part2->dir = interact->part2->chdir[0]; closestdist(interact); } atrenergy += eattractive_psc_psc(interact,1,0); } if ( (firstT) && (secondT) ) { if (firstCH && secondCH) { interact->part1->dir = interact->part1->chdir[1]; interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } if (firstCH && !secondCH) { interact->part1->dir = interact->part1->chdir[1]; closestdist(interact); } if (!firstCH && secondCH) { interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } atrenergy += eattractive_psc_psc(interact,1,1); } if (secondT) { if (firstCH && secondCH) { interact->part1->dir = interact->part1->chdir[0]; interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } if (firstCH && !secondCH) { interact->part1->dir = interact->part1->chdir[0]; interact->part2->dir = olddir2; closestdist(interact); } if (!firstCH && secondCH) { interact->part1->dir = olddir1; interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } atrenergy += eattractive_psc_psc(interact,0,1); } } if (firstCH) interact->part1->dir = olddir1; if (secondCH) interact->part2->dir = olddir2; } return repenergy+atrenergy; } /* Determines attractive energy of two spherocylinders type PSC PSC */ double eattractive_psc_psc(struct interacts * interact,int patchnum1,int patchnum2) { int i, intrs; double rcut, atrenergy, ndist; double v1, v2, f0, f1, f2, T1, T2, S1, S2, a; double intersections[5]; struct vector vec1, vec2, vec_intrs, vec_mindist; struct vector vec_sub(struct vector, struct vector); struct vector vec_sum(struct vector, struct vector); struct vector vec_create(double, double, double); struct vector vec_scale(struct vector, double); struct vector vec_perpproject(struct vector *, struct vector*); struct vector mindist_segments(struct vector, double, struct vector, double, struct vector); void normalise(struct vector *); int psc_intersect(struct particles *, struct particles *, double, double, struct vector, double *,double, struct ia_param *, int which, int patchnum); double fanglscale(double, struct ia_param *, int which); rcut = interact->param->rcut; //interact->halfl = interact->param->half_len[0]; //DEBUG_SIM("halfl = %lf", interact->halfl); for(i=0;i<5;i++) intersections[i]=0; //cospatch = param.pcanglsw; //cospatchinr = param.pcangl; /*1- do intersections of spherocylinder2 with patch of spherocylinder1 at. cut distance C*/ //DEBUG_SIM("first intersection"); intrs=psc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1); if (intrs <2){ //DEBUG_SIM("No intersection :("); return 0.0; /*sc is all outside patch, attractive energy is 0*/ } T1=intersections[0]; /*points on sc2*/ T2=intersections[1]; /*2- now do the same oposite way psc1 in patch of psc2*/ for(i=0;i<5;i++) intersections[i]=0; //DEBUG_SIM("get vector"); vec1=vec_scale(interact->r_cm,-1.0); //DEBUG_SIM("second intersection"); intrs=psc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2); if (intrs <2) return 0.0; /*sc is all outside patch, attractive energy is 0*/ S1=intersections[0]; /*points on sc1*/ S2=intersections[1]; /*3- scaling function1: dependence on the length of intersetions*/ v1=fabs(S1-S2); v2=fabs(T1-T2); f0=0.5*(v1+v2); /*4a- with two intersection pices calculate vector between their CM -this is for angular orientation*/ vec1=vec_scale(interact->part1->dir,(S1+S2)*0.5); vec2=vec_scale(interact->part2->dir,(T1+T2)*0.5); vec_intrs.x=vec2.x-vec1.x-interact->r_cm.x; vec_intrs.y=vec2.y-vec1.y-interact->r_cm.y; vec_intrs.z=vec2.z-vec1.z-interact->r_cm.z; /*vec_intrs should be from sc1 to sc2*/ //fprintf (stderr, "segments_CM: %.8f %.8f %.8f \n",vec_intrs.x,vec_intrs.y,vec_intrs.z); /*4b - calculate closest distance attractive energy from it*/ vec_mindist = mindist_segments(interact->part1->dir,v1,interact->part2->dir,v2,vec_intrs); //fprintf (stderr, "segments closest dist: %.8f %.8f %.8f \n",vec_mindist.x,vec_mindist.y,vec_mindist.z); ndist=sqrt(DOT(vec_mindist,vec_mindist)); //dist=DOT(vec_intrs,vec_intrs); if (ndist < interact->param->pdis) atrenergy = -interact->param->epsilon; //atrenergy = -1.0; else { atrenergy = cos(PIH*(ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } /*5- scaling function2: angular dependence of patch1*/ vec1=vec_scale(vec_intrs,1.0); //vec1=vec_scale(vec_mindist,-1.0); vec1=vec_perpproject(&vec1, &interact->part1->dir); normalise(&vec1); a = DOT(vec1,interact->part1->patchdir[patchnum1]); f1 = fanglscale(a,interact->param, 0+2*patchnum1); /*6- scaling function3: angular dependence of patch2*/ vec1=vec_scale(vec_intrs,-1.0); //vec1=vec_scale(vec_mindist,1.0); vec1=vec_perpproject(&vec1, &interact->part2->dir); normalise(&vec1); a = DOT(vec1,interact->part2->patchdir[patchnum2]); f2 = fanglscale(a,interact->param, 1+2*patchnum2); //printf("v1: %f v2: %f f0: %f f1: %f f2: %f ener: %f\n",v1,v2,f0,f1,f2,atrenergy); /*7- put it all together*/ atrenergy *=f0*f1*f2; //if (atrenergy < 0) printf ("atraction %f\n",atrenergy); // fprintf (stderr, "attraction %.8f \n",atrenergy); // exit(1); return atrenergy; } /* a = r_ij * n_i */ double fanglscale(double a, struct ia_param * param, int which) { double f; // TODO for different types if (a <= param->pcanglsw[which]) f=0.0; else { if (a >= param->pcangl[which]) f=1.0; else { f = 0.5 - ((param->pcanglsw[which] + param->pcangl[which])*0.5 - a )/(param->pcangl[which] - param->pcanglsw[which]); } } return f; } /*CPSC..............................................................................*/ /* Determines total energy of two spherocylinders of type 3 -cylindrical psc -CPSC */ double e_cpsc_cpsc(struct interacts * interact) { double atrenergy, repenergy; void closestdist(struct interacts *); double erepulsive(struct interacts *); double eattractive_cpsc_cpsc(struct interacts *,int,int); //DEBUG_SIM("do energy 33") ; closestdist(interact); repenergy = erepulsive(interact); //DEBUG_SIM("got the rep. energy"); if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) ) atrenergy = 0.0; else { BOOL firstCH=FALSE, secondCH=FALSE; struct vector olddir1 = interact->part1->dir; struct vector olddir2 = interact->part2->dir; if ( (interact->param->geotype[0] == CHCPSC)||(interact->param->geotype[0] == TCHCPSC) ) firstCH = TRUE; if ( (interact->param->geotype[1] == CHCPSC)||(interact->param->geotype[1] == TCHCPSC) ) secondCH = TRUE; if(firstCH) interact->part1->dir = interact->part1->chdir[0]; if(secondCH) interact->part2->dir = interact->part2->chdir[0]; if ((firstCH) || (secondCH) ) { closestdist(interact); } atrenergy = eattractive_cpsc_cpsc(interact,0,0); /*addition of interaction of second patches*/ if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) || (interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) ) { BOOL firstT=FALSE, secondT=FALSE; if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) ) firstT = TRUE; if ( (interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) ) secondT = TRUE; if (firstT) { if (firstCH) { interact->part1->dir = interact->part1->chdir[1]; closestdist(interact); } atrenergy += eattractive_cpsc_cpsc(interact,1,0); } if ( (firstT) && (secondT) ) { if (secondCH) { interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } atrenergy += eattractive_cpsc_cpsc(interact,1,1); } if (secondT) { if (firstT && firstCH ) { interact->part1->dir = interact->part1->chdir[0]; closestdist(interact); } atrenergy += eattractive_cpsc_cpsc(interact,0,1); } } if (firstCH) interact->part1->dir = olddir1; if (secondCH) interact->part2->dir = olddir2; } return repenergy+atrenergy; } /* Determines attractive energy of two spherocylinders of type 3 -cylindrical psc -CPSC */ double eattractive_cpsc_cpsc(struct interacts * interact, int patchnum1, int patchnum2) { int i, intrs; double rcut, atrenergy, v1, v2, f0, f1, f2, T1, T2, S1, S2, a, ndist; double intersections[5]; struct vector vec1, vec2, vec_intrs, vec_mindist; struct vector vec_sub(struct vector, struct vector); struct vector vec_sum(struct vector, struct vector); struct vector vec_create(double, double, double); struct vector vec_scale(struct vector, double); struct vector vec_perpproject(struct vector*, struct vector*); struct vector mindist_segments(struct vector, double, struct vector, double, struct vector); void normalise(struct vector *); int cpsc_intersect(struct particles *, struct particles *, double, double, struct vector, double *,double, struct ia_param *, int which, int patchnum); double fanglscale(double, struct ia_param *, int which); rcut = interact->param->rcut; // interact->halfl = interact->param->half_len[0]; for(i=0;i<5;i++) intersections[i]=0; /*1- do intersections of spherocylinder2 with patch of spherocylinder1 at. cut distance C*/ intrs=cpsc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1); if (intrs <2) return 0.0; /*sc is all outside patch, attractive energy is 0*/ T1=intersections[0]; /*points on sc2*/ T2=intersections[1]; /*2- now do the same oposite way psc1 in patch of psc2*/ for(i=0;i<5;i++) intersections[i]=0; vec1=vec_scale(interact->r_cm,-1.0); intrs=cpsc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2); if (intrs <2) return 0.0; /*sc is all outside patch, attractive energy is 0*/ S1=intersections[0]; /*points on sc1*/ S2=intersections[1]; /*3- scaling function1: dependence on the length of intersetions*/ v1=fabs(S1-S2); v2=fabs(T1-T2); f0=0.5*(v1+v2); /*4a- with two intersection pices calculate vector between their CM -this is for angular orientation*/ vec1=vec_scale(interact->part1->dir,(S1+S2)*0.5); vec2=vec_scale(interact->part2->dir,(T1+T2)*0.5); vec_intrs.x=vec2.x-vec1.x-interact->r_cm.x; vec_intrs.y=vec2.y-vec1.y-interact->r_cm.y; vec_intrs.z=vec2.z-vec1.z-interact->r_cm.z; /*vec_intrs should be from sc1 to sc2*/ // fprintf (stderr, "segments_CM: %.8f %.8f %.8f \n",vec_intrs.x,vec_intrs.y,vec_intrs.z); /*4b - calculate closest distance attractive energy from it*/ vec_mindist = mindist_segments(interact->part1->dir,v1,interact->part2->dir,v2,vec_intrs); // fprintf (stderr, "segments closest dist: %.8f %.8f %.8f \n",vec_mindist.x,vec_mindist.y,vec_mindist.z); ndist=sqrt(DOT(vec_mindist,vec_mindist)); //dist=DOT(vec_intrs,vec_intrs); if (ndist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy = cos(PIH*(ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } /*5- scaling function2: angular dependence of patch1*/ vec1=vec_scale(vec_intrs,1.0); //vec1=vec_scale(vec_mindist,-1.0); vec1=vec_perpproject(&vec1, &interact->part1->dir); normalise(&vec1); a = DOT(vec1,interact->part1->patchdir[patchnum1]); f1 = fanglscale(a,interact->param, 0+2*patchnum1); /*6- scaling function3: angular dependence of patch2*/ vec1=vec_scale(vec_intrs,-1.0); //vec1=vec_scale(vec_mindist,1.0); vec1=vec_perpproject(&vec1, &interact->part2->dir); normalise(&vec1); a = DOT(vec1,interact->part2->patchdir[patchnum2]); f2 = fanglscale(a,interact->param, 1+2*patchnum2); /*7- put it all together*/ atrenergy *=f0*f1*f2; //if (atrenergy < 0) printf ("atraction %f\n",atrenergy); // fprintf (stderr, "attraction %.8f \n",atrenergy); // exit(1); return atrenergy; } /*..............................................................................*/ /* Determines total energy of spherocylinders type PSC and CPSC */ double e_psc_cpsc(struct interacts * interact) { double atrenergy, repenergy; void closestdist(struct interacts *); double erepulsive(struct interacts *); double eattractive_psc_cpsc(struct interacts *,int,int); //DEBUG_SIM("do energy 23") ; closestdist(interact); repenergy = erepulsive(interact); //DEBUG_SIM("got the rep. energy"); if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) ) atrenergy = 0.0; else { BOOL firstCH=FALSE, secondCH=FALSE; struct vector olddir1 = interact->part1->dir; struct vector olddir2 = interact->part2->dir; if ((interact->param->geotype[0] == CHPSC) || (interact->param->geotype[0] == CHCPSC)|| (interact->param->geotype[0] == TCHPSC) || (interact->param->geotype[0] == TCHCPSC) ) firstCH = TRUE; if ((interact->param->geotype[1] == CHPSC) || (interact->param->geotype[1] == CHCPSC)|| (interact->param->geotype[1] == TCHPSC) || (interact->param->geotype[1] == TCHCPSC) ) secondCH = TRUE; if(firstCH) interact->part1->dir = interact->part1->chdir[0]; if(secondCH) interact->part2->dir = interact->part2->chdir[0]; if ((firstCH) || (secondCH) ) { closestdist(interact); } atrenergy = eattractive_psc_cpsc(interact,0,0); /*addition of interaction of second patches*/ if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) || (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) || (interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) || (interact->param->geotype[1] == TPSC) || (interact->param->geotype[1] == TCHPSC) ) { BOOL firstT=FALSE, secondT=FALSE; if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) || (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) ) firstT = TRUE; if ( (interact->param->geotype[1] == TCPSC) || (interact->param->geotype[1] == TCHCPSC) || (interact->param->geotype[1] == TPSC) || (interact->param->geotype[1] == TCHPSC) ) secondT = TRUE; if (firstT) { if (firstCH && secondCH) { interact->part1->dir = interact->part1->chdir[1]; interact->part2->dir = interact->part2->chdir[0]; closestdist(interact); } if (firstCH && !secondCH) { interact->part1->dir = interact->part1->chdir[1]; closestdist(interact); } if (!firstCH && secondCH) { interact->part2->dir = interact->part2->chdir[0]; closestdist(interact); } atrenergy += eattractive_psc_cpsc(interact,1,0); } if ( (firstT) && (secondT) ) { if (firstCH && secondCH) { interact->part1->dir = interact->part1->chdir[1]; interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } if (firstCH && !secondCH) { interact->part1->dir = interact->part1->chdir[1]; closestdist(interact); } if (!firstCH && secondCH) { interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } atrenergy += eattractive_psc_cpsc(interact,1,1); } if (secondT) { if (firstCH && secondCH) { interact->part1->dir = interact->part1->chdir[0]; interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } if (firstCH && !secondCH) { interact->part1->dir = interact->part1->chdir[0]; interact->part2->dir = olddir2; closestdist(interact); } if (!firstCH && secondCH) { interact->part1->dir = olddir1; interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } atrenergy += eattractive_psc_cpsc(interact,0,1); } } if (firstCH) interact->part1->dir = olddir1; if (secondCH) interact->part2->dir = olddir2; } return repenergy+atrenergy; } /* Determines attractive energy of spherocylinders type PSC and CPSC */ double eattractive_psc_cpsc(struct interacts * interact,int patchnum1,int patchnum2) { int i, intrs; double rcut, atrenergy, ndist; double v1, v2, f0, f1, f2, T1, T2, S1, S2, a; double intersections[5]; struct vector vec1, vec2, vec_intrs, vec_mindist; struct vector vec_sub(struct vector, struct vector); struct vector vec_sum(struct vector, struct vector); struct vector vec_create(double, double, double); struct vector vec_scale(struct vector, double); struct vector vec_perpproject(struct vector*, struct vector*); struct vector mindist_segments(struct vector, double, struct vector, double, struct vector); void normalise(struct vector *); int psc_intersect(struct particles *, struct particles *, double, double, struct vector, double *,double, struct ia_param *, int which,int patchnum); int cpsc_intersect(struct particles *, struct particles *, double, double, struct vector, double *,double, struct ia_param *, int which,int patchnum); double fanglscale(double, struct ia_param *, int which); rcut = interact->param->rcut; //interact->halfl = interact->param->half_len[0]; //DEBUG_SIM("halfl = %lf", interact->halfl); for(i=0;i<5;i++) intersections[i]=0; BOOL first; if ( (interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)||(interact->param->geotype[0] == TPSC)||(interact->param->geotype[0] == TCHPSC) ){ first = TRUE; } else { first = FALSE; } //cospatch = param.pcanglsw; //cospatchinr = param.pcangl; /*1- do intersections of spherocylinder2 with patch of spherocylinder1 at. cut distance C*/ //DEBUG_SIM("first intersection"); if (first) { intrs=psc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1); } else { intrs=cpsc_intersect(interact->part1,interact->part2,interact->param->half_len[0],interact->param->half_len[1],interact->r_cm, intersections, rcut, interact->param,0, patchnum1); } //DEBUG_SIM("first intersection: done"); if (intrs <2){ //DEBUG_SIM("No intersection :("); return 0.0; /*sc is all outside patch, attractive energy is 0*/ } T1=intersections[0]; /*points on sc2*/ T2=intersections[1]; /*2- now do the same oposite way psc1 in patch of psc2*/ for(i=0;i<5;i++) intersections[i]=0; //DEBUG_SIM("get vector"); vec1=vec_scale(interact->r_cm,-1.0); //DEBUG_SIM("second intersection"); if (first) { intrs=cpsc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2); } else { intrs=psc_intersect(interact->part2,interact->part1,interact->param->half_len[1],interact->param->half_len[0],vec1, intersections, rcut, interact->param,1, patchnum2); } if (intrs <2) return 0.0; /*sc is all outside patch, attractive energy is 0*/ S1=intersections[0]; /*points on sc1*/ S2=intersections[1]; /*3- scaling function1: dependence on the length of intersetions*/ v1=fabs(S1-S2); v2=fabs(T1-T2); f0=0.5*(v1+v2); /*4a- with two intersection pices calculate vector between their CM -this is for angular orientation*/ vec1=vec_scale(interact->part1->dir,(S1+S2)*0.5); vec2=vec_scale(interact->part2->dir,(T1+T2)*0.5); vec_intrs.x=vec2.x-vec1.x-interact->r_cm.x; vec_intrs.y=vec2.y-vec1.y-interact->r_cm.y; vec_intrs.z=vec2.z-vec1.z-interact->r_cm.z; /*vec_intrs should be from sc1 to sc2*/ // fprintf (stderr, "segments_CM: %.8f %.8f %.8f \n",vec_intrs.x,vec_intrs.y,vec_intrs.z); /*4b - calculate closest distance attractive energy from it*/ vec_mindist = mindist_segments(interact->part1->dir,v1,interact->part2->dir,v2,vec_intrs); // fprintf (stderr, "segments closest dist: %.8f %.8f %.8f \n",vec_mindist.x,vec_mindist.y,vec_mindist.z); ndist=sqrt(DOT(vec_mindist,vec_mindist)); //dist=DOT(vec_intrs,vec_intrs); if (ndist < interact->param->pdis) atrenergy = -interact->param->epsilon; //atrenergy = -1.0; else { atrenergy = cos(PIH*(ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } /*5- scaling function2: angular dependence of patch1*/ vec1=vec_scale(vec_intrs,1.0); //vec1=vec_scale(vec_mindist,-1.0); vec1=vec_perpproject(&vec1, &interact->part1->dir); normalise(&vec1); a = DOT(vec1,interact->part1->patchdir[patchnum1]); f1 = fanglscale(a,interact->param, 0+2*patchnum1); /*6- scaling function3: angular dependence of patch2*/ vec1=vec_scale(vec_intrs,-1.0); //vec1=vec_scale(vec_mindist,1.0); vec1=vec_perpproject(&vec1, &interact->part2->dir); normalise(&vec1); a = DOT(vec1,interact->part2->patchdir[patchnum2]); f2 = fanglscale(a,interact->param, 1+2*patchnum2); /*7- put it all together*/ atrenergy *=f0*f1*f2; //if (atrenergy < 0) printf ("atraction %f\n",atrenergy); // fprintf (stderr, "attraction %.8f \n",atrenergy); // exit(1); return atrenergy; } /*..............................................................................*/ /* * Determines total energy of spherocylinder type 1 and sphere type 11 */ double e_spa_sca(struct interacts * interact) { double atrenergy, repenergy, b, f0, halfl; struct vector vec_perpproject(struct vector *, struct vector *); void normalise(struct vector *); void closestdist(struct interacts *); double erepulsive(struct interacts *); double fanglscale(double, struct ia_param *, int which); //DEBUG printf ("do energy 111 \n\n"); closestdist(interact); repenergy = erepulsive(interact); if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) ) atrenergy = 0.0; else { /*calculate closest distance attractive energy*/ if (interact->dist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } /*scaling function for the length of spherocylinder within cutoff*/ if (interact->param->geotype [0] < SP) halfl = interact->param->half_len[0]; else halfl = interact->param->half_len[1]; b = sqrt(interact->param->rcut*interact->param->rcut-interact->dist*interact->dist); if ( interact->contt + b > halfl ) f0 = halfl; else f0 = interact->contt + b; if ( interact->contt - b < -halfl ) f0 -= -halfl; else f0 -= interact->contt - b; atrenergy *= f0; //if (atrenergy < 0) printf ("atraction %f\n",atrenergy); //fprintf (stderr, "attraction211 %.8f x: %.8f y: %.8f z: %.8f \n",atrenergy,vec1.x,vec1.y,vec1.z); //exit(1); } return repenergy+atrenergy; } /*..............................................................................*/ /* * Determines total energy of spherocylinder type 2 and sphere type 11 */ double e_psc_spa(struct interacts * interact) { double atrenergy = 0.0, repenergy; void closestdist(struct interacts *); double erepulsive(struct interacts *); double eattractive_psc_spa(struct interacts *, int); //DEBUG_SIM("do energy 211") ; closestdist(interact); repenergy = erepulsive(interact); //DEBUG_SIM("got the rep. energy"); if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) ) atrenergy = 0.0; else { BOOL firstCH=FALSE, secondCH=FALSE; struct vector olddir1 = interact->part1->dir; struct vector olddir2 = interact->part2->dir; if ( (interact->param->geotype[0] == CHPSC) || (interact->param->geotype[0] == TCHPSC) ) firstCH = TRUE; if ( (interact->param->geotype[1] == CHPSC) || (interact->param->geotype[1] == TCHPSC) ) secondCH = TRUE; if(firstCH) interact->part1->dir = interact->part1->chdir[0]; if(secondCH) interact->part2->dir = interact->part2->chdir[0]; if ((firstCH) || (secondCH) ) { closestdist(interact); } if(interact->dist < interact->param->rcut) atrenergy = eattractive_psc_spa(interact,0); //addition of interaction of second patches if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) || (interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) ) { BOOL firstT=FALSE, secondT=FALSE; if ( (interact->param->geotype[0] == TPSC) || (interact->param->geotype[0] == TCHPSC) ) firstT = TRUE; if ( (interact->param->geotype[1] == TPSC) ||(interact->param->geotype[1] == TCHPSC) ) secondT = TRUE; if (firstT) { if (firstCH) { interact->part1->dir = interact->part1->chdir[1]; closestdist(interact); } if(interact->dist < interact->param->rcut) atrenergy += eattractive_psc_spa(interact,1); } if (secondT) { if(secondCH) { interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } if(interact->dist < interact->param->rcut) atrenergy += eattractive_psc_spa(interact,1); } if ( (firstT) && (secondT) ) { fprintf (stderr, "ERROR PSC should interact s SPA but got two PSC \n"); exit(1); } } interact->part1->dir = olddir1; interact->part2->dir = olddir2; } return repenergy+atrenergy; } /* * Determines attractive energy of spherocylinder type 2 and sphere type 11 */ double eattractive_psc_spa(struct interacts * interact, int patchnum1) { double atrenergy, a, b, f0, halfl; struct vector vec1; struct vector vec_perpproject(struct vector *, struct vector*); void normalise(struct vector *); double fanglscale(double, struct ia_param *, int which); int which; /*calculate closest distance attractive energy*/ if (interact->dist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } /*scaling function: angular dependence of patch1*/ if (interact->param->geotype[0] < SP) { which = 0; vec1=vec_perpproject(&interact->distvec, &interact->part1->dir); normalise(&vec1); a = DOT(vec1,interact->part1->patchdir[patchnum1]); halfl=interact->param->half_len[0]; } else { which = 1; vec1=vec_perpproject(&interact->distvec, &interact->part2->dir); normalise(&vec1); a = DOT(vec1,interact->part2->patchdir[patchnum1]); halfl=interact->param->half_len[1]; } // caling function for the length of spherocylinder within cutoff b = sqrt(interact->param->rcut*interact->param->rcut - interact->dist*interact->dist); if ( interact->contt + b > halfl ) f0 = halfl; else f0 = interact->contt + b; if ( interact->contt - b < -halfl ) f0 -= -halfl; else f0 -= interact->contt - b; atrenergy *= fanglscale(a,interact->param, which)*f0; //if (atrenergy < 0) printf ("atraction %f\n",atrenergy); //fprintf (stderr, "attraction211 %.8f x: %.8f y: %.8f z: %.8f \n",atrenergy,vec1.x,vec1.y,vec1.z); //exit(1); return atrenergy; } /*..............................................................................*/ /* Determines total energy of spherocylinder type 3 and sphere type 11 */ double e_cpsc_spa(struct interacts * interact) { double atrenergy, repenergy, halfl; void closestdist(struct interacts *); double erepulsive(struct interacts *); double eattractive_cpsc_spa(struct interacts *,int); //DEBUG_SIM("do energy 311") ; closestdist(interact); repenergy = erepulsive(interact); //DEBUG_SIM("got the rep. energy"); if (interact->param->geotype[0] < SP) { halfl=interact->param->half_len[0]; } else { halfl=interact->param->half_len[1]; } if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) || ( interact->dist > interact->param->rcut ) || (interact->contt > halfl) || (interact->contt < -halfl) ) atrenergy = 0.0; else { BOOL firstCH=FALSE, secondCH=FALSE; struct vector olddir1 = interact->part1->dir; struct vector olddir2 = interact->part2->dir; if ( (interact->param->geotype[0] == CHCPSC) || (interact->param->geotype[0] == TCHCPSC) ) firstCH = TRUE; if ( (interact->param->geotype[1] == CHCPSC) || (interact->param->geotype[1] == TCHCPSC) ) secondCH = TRUE; if(firstCH) interact->part1->dir = interact->part1->chdir[0]; if(secondCH) interact->part2->dir = interact->part2->chdir[0]; if ((firstCH) || (secondCH) ) { closestdist(interact); } if(interact->dist < interact->param->rcut) atrenergy = eattractive_cpsc_spa(interact,0); /*addition of interaction of second patches*/ if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) || (interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) ) { BOOL firstT=FALSE, secondT=FALSE; if ( (interact->param->geotype[0] == TCPSC) || (interact->param->geotype[0] == TCHCPSC) ) firstT = TRUE; if ( (interact->param->geotype[1] == TCPSC) ||(interact->param->geotype[1] == TCHCPSC) ) secondT = TRUE; if (firstT) { if (firstCH) { interact->part1->dir = interact->part1->chdir[1]; closestdist(interact); } if(interact->dist < interact->param->rcut) atrenergy += eattractive_cpsc_spa(interact,1); } if (secondT) { if(secondCH) { interact->part2->dir = interact->part2->chdir[1]; closestdist(interact); } if(interact->dist < interact->param->rcut) atrenergy += eattractive_cpsc_spa(interact,1); } if ( (firstT) && (secondT) ) { fprintf (stderr, "ERROR PSC should interact s SPA but got two PSC \n"); exit(1); } } if (firstCH) interact->part1->dir = olddir1; if (secondCH) interact->part2->dir = olddir2; } return repenergy+atrenergy; } /* Determines attractive energy of spherocylinder type 3 and sphere type 11 */ double eattractive_cpsc_spa(struct interacts * interact,int patchnum1) { double atrenergy, a, b, f0, halfl; struct vector vec1; int which; struct vector vec_perpproject(struct vector *, struct vector*); void normalise(struct vector *); double fanglscale(double, struct ia_param *, int which); /*if it is in cylindrical part c>-halfl and c<halfl*/ /*calculate closest distance attractive energy*/ if (interact->dist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } /*scaling function: angular dependence of patch1*/ if (interact->param->geotype[0] < SP) { which = 0; vec1=vec_perpproject(&interact->distvec, &interact->part1->dir); normalise(&vec1); a = DOT(vec1,interact->part1->patchdir[patchnum1]); halfl = interact->param->half_len[0]; } else { which = 1; vec1=vec_perpproject(&interact->distvec, &interact->part2->dir); normalise(&vec1); a = DOT(vec1,interact->part2->patchdir[patchnum1]); halfl = interact->param->half_len[1]; } /*scaling function for the length of spherocylinder within cutoff*/ b = sqrt(interact->param->rcut*interact->param->rcut-interact->dist*interact->dist); if ( interact->contt + b > halfl ) f0 = halfl; else f0 = interact->contt + b; if ( interact->contt - b < -halfl ) f0 -= -halfl; else f0 -= interact->contt - b; atrenergy *= fanglscale(a,interact->param, which)*f0; //if (atrenergy < 0) printf ("atraction %f\n",atrenergy); //fprintf (stderr, "attraction311 %.8f a: %.8f\n",atrenergy,a); //exit(1); return atrenergy; } /*..............................................................................*/ /* Determines total energy of two spherocylinders type 11 */ double e_2sca_or_2spa(struct interacts * interact) { double repenergy, atrenergy; double erepulsive(struct interacts *); void closestdist(struct interacts *); closestdist(interact); repenergy = erepulsive(interact); if ( ( interact->dist > interact->param->rcut ) || ( interact->param->epsilon == 0.0 ) ) atrenergy = 0.0; else { if (interact->dist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy = cos(PIH*(interact->dist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon ; } } return repenergy+atrenergy; } /*..............................................................................*/ /* Determines total energy with purely repulsive types */ double e_spn_or_scn(struct interacts * interact) { double repenergy; double erepulsive(struct interacts *); void closestdist(struct interacts *); closestdist(interact); repenergy = erepulsive(interact); return repenergy; } /*..............................................................................*/ /* Determines repulsive energy of two spherocylinders */ double erepulsive(struct interacts * interact) { double repenergy, en6; /* WCA repulsion */ if (interact->dist > interact->param->rcutwca) repenergy = 0.0; else { en6 = pow((interact->param->sigma/interact->dist),6); repenergy = interact->param->epsilon * ( 4*en6*(en6-1) + 1.0); } //int Digs = 20; //printf("dist: %.*e, repenergy: %.*e\n",Digs, interact->dist, Digs, repenergy); return repenergy; } /*..............................................................................*/ /* Indicates not yet programmed interaction */ double enoexist(struct interacts * interact) { double energy=0.0; fprintf (stderr, "ERROR: We have not programed interaction of types %d and %d\n", interact->part1->type,interact->part2->type); exit (1); return energy; } /* function for calculation of harmonic potential*/ double harmonic(double aktualvalue, double eqvalue, double springconst) { return springconst*(aktualvalue-eqvalue)*(aktualvalue-eqvalue)*0.5; } /*..............................................................................*/ /* Determines bond energy */ double bondenergy(long num1, long num2, struct interacts * interact, struct topo * topo, struct conf * conf) { double energy=0.0, bondlength, halfl; struct vector vec1, vec2, vecbond; int * geotype = interact->param->geotype; struct vector image(struct vector, struct vector, struct vector); double harmonic(double, double, double); /*interaction with nearest neighbours -harmonic*/ if ((topo->chainparam[conf->particle[num1].chaint]).bond1c >= 0) { if (num2 == topo->conlist[num1][1]) { /*num1 is connected to num2 by tail*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c); else { if (geotype[0] < SP) halfl=interact->param->half_len[0]; else halfl = 0.0; vec1.x=conf->particle[num1].pos.x - conf->particle[num1].dir.x * halfl /conf->box.x; vec1.y=conf->particle[num1].pos.y - conf->particle[num1].dir.y * halfl /conf->box.y; vec1.z=conf->particle[num1].pos.z - conf->particle[num1].dir.z * halfl /conf->box.z; if (geotype[1] < SP) halfl=interact->param->half_len[1]; else halfl = 0.0; vec2.x=conf->particle[num2].pos.x + conf->particle[num2].dir.x * halfl /conf->box.x; vec2.y=conf->particle[num2].pos.y + conf->particle[num2].dir.y * halfl /conf->box.y; vec2.z=conf->particle[num2].pos.z + conf->particle[num2].dir.z * halfl /conf->box.z; vecbond = image(vec1, vec2, conf->box); bondlength = sqrt(DOT(vecbond,vecbond)); energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c); } } else { if (num2 == topo->conlist[num1][0]) { /*num1 is connected to num2 by head*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c); else { if (geotype[0] < SP) halfl=interact->param->half_len[0]; else halfl = 0.0; vec1.x=conf->particle[num1].pos.x + conf->particle[num1].dir.x * halfl /conf->box.x; vec1.y=conf->particle[num1].pos.y + conf->particle[num1].dir.y * halfl /conf->box.y; vec1.z=conf->particle[num1].pos.z + conf->particle[num1].dir.z * halfl /conf->box.z; if (geotype[0] < SP) halfl=interact->param->half_len[0]; else halfl = 0.0; vec2.x=conf->particle[num2].pos.x - conf->particle[num2].dir.x * halfl /conf->box.x; vec2.y=conf->particle[num2].pos.y - conf->particle[num2].dir.y * halfl /conf->box.y; vec2.z=conf->particle[num2].pos.z - conf->particle[num2].dir.z * halfl /conf->box.z; vecbond = image(vec1, vec2, conf->box); bondlength = sqrt(DOT(vecbond,vecbond)); energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c); } } } } /*interaction with second nearest neighbours -harmonic*/ if (topo->chainparam[conf->particle[num1].chaint].bond2c >= 0) { if (num2 == topo->conlist[num1][2]) { /*num1 is connected to num2 by tail*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c); else { vecbond = image(conf->particle[num1].pos, conf->particle[num2].pos, conf->box); bondlength = sqrt(DOT(vecbond,vecbond)); energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c); } } else { if (num2 == topo->conlist[num1][3]) { /*num1 is connected to num2 by head*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c); else { vecbond = image(conf->particle[num1].pos, conf->particle[num2].pos, conf->box); bondlength = sqrt(DOT(vecbond,vecbond)); energy = harmonic(bondlength,topo->chainparam[conf->particle[num1].chaint].bond2eq,topo->chainparam[conf->particle[num1].chaint].bond2c); } } } } /*interaction with nearest neighbours - direct harmonic bond*/ if ((topo->chainparam[conf->particle[num1].chaint]).bonddc > 0) { if (num2 == topo->conlist[num1][1]) { /*num1 is connected to num2 by tail*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bonddeq,topo->chainparam[conf->particle[num1].chaint].bonddc); else { if (geotype[0] < SP) halfl=interact->param->half_len[0]; else halfl = 0.0; vec1.x=conf->particle[num1].pos.x - conf->particle[num1].dir.x * halfl /conf->box.x; vec1.y=conf->particle[num1].pos.y - conf->particle[num1].dir.y * halfl /conf->box.y; vec1.z=conf->particle[num1].pos.z - conf->particle[num1].dir.z * halfl /conf->box.z; if (geotype[1] < SP) halfl=interact->param->half_len[1]; else halfl = 0.0; vec2.x=conf->particle[num2].pos.x + conf->particle[num2].dir.x * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.x ; vec2.y=conf->particle[num2].pos.y + conf->particle[num2].dir.y * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.y ; vec2.z=conf->particle[num2].pos.z + conf->particle[num2].dir.z * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.z ; vecbond = image(vec1, vec2, conf->box); bondlength = sqrt(DOT(vecbond,vecbond)); energy = harmonic(bondlength,0.0,topo->chainparam[conf->particle[num1].chaint].bonddc); } } else { if (num2 == topo->conlist[num1][0]) { /*num1 is connected to num2 by head*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) energy = harmonic(interact->distcm,topo->chainparam[conf->particle[num1].chaint].bond1eq,topo->chainparam[conf->particle[num1].chaint].bond1c); else { if (geotype[0] < SP) halfl=interact->param->half_len[0]; else halfl = 0.0; vec1.x=conf->particle[num1].pos.x + conf->particle[num1].dir.x * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.x ; vec1.y=conf->particle[num1].pos.y + conf->particle[num1].dir.y * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.y ; vec1.z=conf->particle[num1].pos.z + conf->particle[num1].dir.z * (halfl + topo->chainparam[conf->particle[num1].chaint].bonddeq) /conf->box.z ; if (geotype[0] < SP) halfl=interact->param->half_len[0]; else halfl = 0.0; vec2.x=conf->particle[num2].pos.x - conf->particle[num2].dir.x * halfl /conf->box.x; vec2.y=conf->particle[num2].pos.y - conf->particle[num2].dir.y * halfl /conf->box.y; vec2.z=conf->particle[num2].pos.z - conf->particle[num2].dir.z * halfl /conf->box.z; vecbond = image(vec1, vec2, conf->box); bondlength = sqrt(DOT(vecbond,vecbond)); energy = harmonic(bondlength,0.0,topo->chainparam[conf->particle[num1].chaint].bonddc); } } } } //printf("bondlength: %f\n",bondlength); // printf("bondener: %f\n",energy); return energy; } /*..............................................................................*/ /* Determines angle energy between spherocylinders */ double angleenergy(long num1, long num2, struct interacts * interact, struct topo * topo, struct conf * conf) { double energy=0.0, currangle, halfl; struct vector vec1, vec2; int * geotype = interact->param->geotype; struct vector image(struct vector, struct vector, struct vector); void normalise(struct vector *); double harmonic(double, double, double); /*angle interaction with nearest neighbours -harmonic*/ if ((topo->chainparam[conf->particle[num1].chaint]).angle1c >= 0) { if (num2 == topo->conlist[num1][0]) { /*num1 is connected to num2 by tail*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) /*spheres do not have this interaction*/ energy += 0.0; else { if (geotype[0] < SP) vec1 = conf->particle[num1].dir; else { halfl=interact->param->half_len[1]; //sphere angle is defined versus the end of spherocylinder vec1.x=conf->particle[num2].pos.x - conf->particle[num2].dir.x * halfl /conf->box.x; vec1.y=conf->particle[num2].pos.y - conf->particle[num2].dir.y * halfl /conf->box.y; vec1.z=conf->particle[num2].pos.z - conf->particle[num2].dir.z * halfl /conf->box.z; vec1 = image(vec1, conf->particle[num1].pos, conf->box); } if (geotype[1] < SP) vec2 = conf->particle[num2].dir; else { halfl=interact->param->half_len[0]; vec2.x=conf->particle[num1].pos.x + conf->particle[num1].dir.x * halfl /conf->box.x; vec2.y=conf->particle[num1].pos.y + conf->particle[num1].dir.y * halfl /conf->box.y; vec2.z=conf->particle[num1].pos.z + conf->particle[num1].dir.z * halfl /conf->box.z; vec2 = image(vec2, conf->particle[num2].pos, conf->box); } normalise(&vec1); normalise(&vec2); currangle = acos(DOT(vec1,vec2)); energy += harmonic(currangle,topo->chainparam[conf->particle[num1].chaint].angle1eq,topo->chainparam[conf->particle[num1].chaint].angle1c); } } else { if (num2 == topo->conlist[num1][1]) { /*num1 is connected to num2 by head*/ if ( (geotype[0] >= SP) && (geotype[1] >= SP) ) /*spheres do not have this interaction*/ energy += 0.0; else { if (geotype[0] < SP) vec1 = conf->particle[num1].dir; else { halfl=interact->param->half_len[1]; //sphere angle is defined versus the end of spherocylinder vec1.x=conf->particle[num2].pos.x + conf->particle[num2].dir.x * halfl /conf->box.x; vec1.y=conf->particle[num2].pos.y + conf->particle[num2].dir.y * halfl /conf->box.y; vec1.z=conf->particle[num2].pos.z + conf->particle[num2].dir.z * halfl /conf->box.z; vec1 = image(vec1, conf->particle[num1].pos, conf->box); } if (geotype[1] < SP) vec2 = conf->particle[num2].dir; else { halfl=interact->param->half_len[0]; vec2.x=conf->particle[num1].pos.x - conf->particle[num1].dir.x * halfl /conf->box.x; vec2.y=conf->particle[num1].pos.y - conf->particle[num1].dir.y * halfl /conf->box.y; vec2.z=conf->particle[num1].pos.z - conf->particle[num1].dir.z * halfl /conf->box.z; vec2 = image(vec2, conf->particle[num2].pos, conf->box); } normalise(&vec1); normalise(&vec2); currangle = acos(DOT(vec1,vec2)); energy += harmonic(currangle,topo->chainparam[conf->particle[num2].chaint].angle1eq,topo->chainparam[conf->particle[num2].chaint].angle1c); } } } } /*interaction between the orientation of spherocylinders patches -harmonic*/ if (topo->chainparam[conf->particle[num1].chaint].angle2c >= 0) { if (num2 == topo->conlist[num1][0]) { /*num1 is connected to num2 by tail*/ if ( (geotype[0] < SP) && (geotype[1] < SP) ) { currangle = acos(DOT(conf->particle[num1].patchdir[0],conf->particle[num2].patchdir[0]) - DOT(conf->particle[num1].dir,conf->particle[num2].patchdir[0]) ); energy += harmonic(currangle,topo->chainparam[conf->particle[num1].chaint].angle2eq,topo->chainparam[conf->particle[num1].chaint].angle2c); } else { energy += 0.0; } } else { if (num2 == topo->conlist[num1][1]) { /*num1 is connected to num2 by head*/ if ( (geotype[0] < SP) && (geotype[1] < SP) ) { currangle = acos(DOT(conf->particle[num2].patchdir[0],conf->particle[num1].patchdir[0]) - DOT(conf->particle[num2].dir,conf->particle[num1].patchdir[0]) ); energy += harmonic(currangle,topo->chainparam[conf->particle[num2].chaint].angle2eq,topo->chainparam[conf->particle[num2].chaint].angle2c); } else { energy += 0.0; } } } } // printf("angleener: %f\n",energy); return energy; } /* cluses distance calculation*/ void closestdist(struct interacts * interact) { double c, d, halfl; struct vector mindist_segments(struct vector dir1, double halfl1, struct vector dir2, double halfl2, struct vector r_cm); double linemin(double, double); //printf("we have %d %d ",interact->param->geotype[0],interact->param->geotype[1] ); if ((interact->param->geotype[0] >= SP) && (interact->param->geotype[1] >= SP)) { /*we have two spheres - most common, do nothing*/ //printf("we have two spheres "); interact->distvec = interact->r_cm; interact->dist = sqrt(interact->dotrcm); interact->distcm = interact->dist; } else { if ((interact->param->geotype[0] < SP) && (interact->param->geotype[1] < SP)) { /*we have two spherocylinders*/ interact->distvec = mindist_segments(interact->part1->dir,interact->param->half_len[0], interact->part2->dir, interact->param->half_len[1], interact->r_cm); interact->dist=sqrt(DOT(interact->distvec,interact->distvec)); } else { if (interact->param->geotype[0] < SP) { /*We have one spherocylinder -it is first one*/ halfl=interact->param->half_len[0];/*finding closest vector from sphyrocylinder to sphere*/ c = DOT(interact->part1->dir,interact->r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } interact->contt = c; interact->distvec.x = - interact->r_cm.x + interact->part1->dir.x * d; interact->distvec.y = - interact->r_cm.y + interact->part1->dir.y * d; interact->distvec.z = - interact->r_cm.z + interact->part1->dir.z * d; interact->dist=sqrt(DOT(interact->distvec,interact->distvec)); } else { /*lst option first one is sphere second one spherocylinder*/ halfl=interact->param->half_len[1]; /*finding closest vector from sphyrocylinder to sphere*/ c = DOT(interact->part2->dir,interact->r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } interact->contt = -c; interact->distvec.x = interact->r_cm.x - interact->part2->dir.x * d; interact->distvec.y = interact->r_cm.y - interact->part2->dir.y * d; interact->distvec.z = interact->r_cm.z - interact->part2->dir.z * d; interact->dist=sqrt(DOT(interact->distvec,interact->distvec)); } } } } /*..............................................................................*/ /* Determines energy of two particles */ double paire(long num1, long num2, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct conf * conf) { double energy=0.0; /* energy*/ struct vector r_cm; /* Vector between centres of mass from part2 to part1*/ struct interacts interact; /*interaction parameters*/ double bondenergy(long, long, struct interacts *, struct topo * topo, struct conf * conf); double angleenergy(long, long, struct interacts *, struct topo * topo, struct conf * conf); /*Placing interactin particle in unit box and finding vector connecting CM*/ /*r_cm = image(part1.pos, part2.pos, box); explicit statement below for performance optimization*/ r_cm.x = conf->particle[num1].pos.x - conf->particle[num2].pos.x; r_cm.y = conf->particle[num1].pos.y - conf->particle[num2].pos.y; r_cm.z = conf->particle[num1].pos.z - conf->particle[num2].pos.z; if ( r_cm.x < 0 ) r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x-0.5) ) ); else r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x+0.5) ) ); if ( r_cm.y < 0 ) r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y-0.5) ) ); else r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y+0.5) ) ); if ( r_cm.z < 0 ) r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z-0.5) ) ); else r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z+0.5) ) ); interact.dotrcm = DOT(r_cm,r_cm); if ( interact.dotrcm > topo->sqmaxcut) return 0.0; /* distance so far that even spherocylinders cannot be within cutoff */ interact.r_cm=r_cm; interact.contt = 0; interact.distvec.x = 0; interact.distvec.y = 0; interact.distvec.z = 0; interact.box = conf->box; interact.part1 = &conf->particle[num1]; interact.part2 = &conf->particle[num2]; interact.param = topo->ia_params[conf->particle[num1].type] + conf->particle[num2].type; if(intfce[conf->particle[num1].type][conf->particle[num2].type] == NULL){ fprintf(stderr, "interaction function for type %d and %d not defined!\n", conf->particle[num1].type, conf->particle[num2].type); } energy = (*intfce[conf->particle[num1].type][conf->particle[num2].type])( &interact); //printf("num: %ld %ld e: %f dist: %f",num1,num2,energy,interact.dist); energy += bondenergy ( num1, num2, &interact, topo, conf); energy += angleenergy ( num1, num2, &interact, topo, conf); //printf(" e: %f\n",energy); return energy; } /*...........................................................................*/ /*Calculates interaction of target particle and external field version 2 calculate projection of spherocylinder in direction of patch and calculate interacting line segment within cutoff */ double extere2 (long target, struct topo * topo, struct conf * conf) { double repenergy=0.0,atrenergy=0.0; /* energy*/ double rcmz; /* z distance between*/ double ndist; /* distance for CM of interacting line segment*/ double interendz; /* z coordinate of interaction end*/ struct interacts interact; /* interaction parameters*/ double orient; double halfl; BOOL positive, orientin; struct vector olddir; struct vector project; /*vector for projection down to plane */ double erepulsive(struct interacts *); // struct vector vec_perpproject(struct vector*, struct vector*); // void normalise(struct vector *); double fanglscale(double, struct ia_param *, int which); void exter2_closestdist(struct interacts * interact, BOOL *positive, BOOL *orientin, double *orient, double *rcmz,double *interendz, struct vector *project); double exter2_atre(struct interacts * interact,int *orientin, double *rcmz, double *interendz, BOOL *positive, double orient,struct vector *project, double *ndist,int, double ); /* calcualte distance to center of mass*/ if ( conf->particle[target].pos.z < 0 ) { rcmz = conf->box.z * (conf->particle[target].pos.z - (double)( (long)(conf->particle[target].pos.z - 0.5) ) ); } else { rcmz = conf->box.z * (conf->particle[target].pos.z - (double)( (long)(conf->particle[target].pos.z + 0.5) ) ); } project.x=0; project.y=0; if (rcmz < 0) { interact.dist = -rcmz; positive = FALSE; interendz = -1.0; project.z = 1.0; } else { interact.dist = rcmz; positive = TRUE; interendz = 1.0; project.z = -1.0; } interact.dotrcm = rcmz * rcmz; if ( interact.dotrcm > topo->exter.sqmaxcut) return 0.0; /* distance so far that even spherocylinders cannot be within cutoff */ interact.distvec.z = interact.r_cm.z; interact.distcm = interact.dist; interact.box = conf->box; interact.part1 = &conf->particle[target]; interact.param = &topo->exter.interactions[conf->particle[target].type]; halfl = 0.5* topo->exter.interactions[conf->particle[target].type].len[0]; ndist = interact.dist; orientin = TRUE; orient = 0.0; exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project); /* now we have closest distance so we can calculate repulsion*/ repenergy = erepulsive(&interact); //printf("dist: %f",interact.dist); /*save chiral stuff*/ olddir = interact.part1->dir; if ((interact.param->geotype[0] == CHCPSC)||(interact.param->geotype[0] == CHPSC)) { interact.part1->dir = interact.part1->chdir[0]; exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project); } if (( interact.dist > interact.param->rcut ) || (interact.param->epsilon == 0.0 ) || ( (interact.part1->patchdir[0].z >0)&&(positive) ) || ( (interact.part1->patchdir[0].z <0)&&(!(positive)) ) ) atrenergy = 0.0; else { atrenergy = exter2_atre(&interact,&orientin,&rcmz,&interendz,&positive,orient,&project,&ndist,0,halfl); } if ((interact.param->geotype[0] == TCPSC)||(interact.param->geotype[0] == TPSC)|| (interact.param->geotype[0] == TCHCPSC)||(interact.param->geotype[0] == TCHPSC)) { if ((interact.param->geotype[0] == TCHCPSC)||(interact.param->geotype[0] == TCHPSC)) { interact.part1->dir = interact.part1->chdir[1]; exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project); } exter2_closestdist(&interact,&positive,&orientin,&orient,&rcmz,&interendz,&project); if (( interact.dist > interact.param->rcut ) || (interact.param->epsilon == 0.0 ) || ( (interact.part1->patchdir[1].z >0)&&(positive) ) || ( (interact.part1->patchdir[1].z <0)&&(!(positive)) ) ) atrenergy += 0.0; else { atrenergy += exter2_atre(&interact,&orientin,&rcmz,&interendz,&positive,orient,&project,&ndist,1,halfl); } } if ((interact.param->geotype[0] == CHCPSC)||(interact.param->geotype[0] == CHPSC)|| (interact.param->geotype[0] == TCHCPSC)||(interact.param->geotype[0] == TCHPSC) ) { interact.part1->dir = olddir; } //printf("%f %f \n",conf->particle[target].pos.z*conf->box.z,repenergy+atrenergy); return repenergy+atrenergy; } double exter2_atre(struct interacts * interact,int *orientin, double *rcmz, double *interendz, BOOL *positive, double orient,struct vector *project, double *ndist,int numpatch, double halfl) { struct vector pbeg,pend; /* projected spherocylinder begining and end*/ double a,length1,length2, f0,f1; struct vector cm1,cm2; /* centrar of interacting segments */ int line; struct vector partbeg,partend; /*closest and furthest point of particle*/ struct vector inters; double atrenergy=0.0; int cpsc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir, struct vector* partdir, double* halfl,BOOL* orientin,BOOL* positive, double* rcmz, double * cut, struct vector* partbeg, struct vector* partend); int psc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir, struct vector* partdir, BOOL* positive, double * cut, struct vector* partbeg, struct vector* partend); /*interaction with PATCHY SPHEROCYLINDERS*/ if ((interact->param->geotype[0] < SP)&&(interact->param->geotype[0] > SCA)) { //printf("partdir: %f %f %f \n ",interact->part1->dir.x,interact->part1->dir.y,interact->part1->dir.z); //printf("patchdir: %f %f %f \n ",interact->part1->patchdir[0].x,interact->part1->patchdir[0].y,interact->part1->patchdir[0].z); /* calculate position of closest and furthest point (begining and end of spherocylinder)*/ a = (*orientin-0.5)*2; /*if orientin a =1 else a=-1 */ partbeg.x = a * interact->part1->dir.x * halfl; partbeg.y = a * interact->part1->dir.y * halfl; partbeg.z = *rcmz + a * interact->part1->dir.z *halfl; partend.x = - a * interact->part1->dir.x * halfl; partend.y = - a * interact->part1->dir.y * halfl; partend.z = *rcmz - a * interact->part1->dir.z * halfl; //printf("partbeg %f %f %f partend %f %f %f \n",partbeg.x,partbeg.y,partbeg.z,partend.x,partend.y,partend.z); /*calculate interacting line segment and its cm of spherocylinder*/ /*calculate end point z*/ if ( (interact->param->rcut - interact->dist)/fabs(interact->part1->dir.z) < 2.0*halfl ){ /*if cutoff goes through spherocylinder the end point is at cutoff*/ *interendz *= interact->param->rcut; } else { /*endpoint is at the end of spherocylinders*/ *interendz = partend.z; } /*calculate CM of interacting line segment of spherocylinder*/ if (*positive) { cm1.z = AVER(*interendz,interact->dist); } else { cm1.z = AVER(*interendz,-interact->dist); } if (interact->part1->dir.z != 0.0 ) { a = (*interendz - cm1.z ) / interact->part1->dir.z; length1= -orient*2.0*a; a = a + orient*halfl; } else { a = 0.0; length1 = 2.0*halfl; } //printf("len1: %f rcm %f interz %f cutoff %f \n",length1,rcmz, interendz,interact.dist); cm1.x = interact->part1->dir.x * a; cm1.y = interact->part1->dir.y * a; /* we have interacting segment*/ if ((interact->param->geotype[0] == CPSC)||(interact->param->geotype[0] == CHCPSC)) { /*CPSC type*/ if ( ((*interendz >= interact->dist)&&(*positive)) || ((*interendz <= -interact->dist)&&(!(*positive))) ){ /*test if projection is not all out of interaction*/ line = cpsc_wall(&pbeg,&pend,project,&interact->part1->dir, \ &interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend); //printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y); } else { line = 0; } } else { /*PSC and CHPSC interaction with wall */ line = psc_wall(&pbeg,&pend,project,&interact->part1->dir, \ positive,&interact->param->rcut,&partbeg,&partend); //printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y); } if (line > 0) { /*cm2 by average begining and end*/ cm2.x = AVER(pbeg.x,pend.x); cm2.y = AVER(pbeg.y,pend.y); cm2.z = 0.0; /*length by size of end-benining*/ length2 = sqrt( (pend.x-pbeg.x)*(pend.x-pbeg.x)+(pend.y-pbeg.y)*(pend.y-pbeg.y) ); inters.x = cm2.x - cm1.x; inters.y = cm2.y - cm1.y; inters.z = cm2.z - cm1.z; //printf("cm2 %f %f %f inters %f %f %f \n",cm2.x,cm2.y,cm2.z,inters.x,inters.y,inters.z); *ndist = sqrt(DOT(inters,inters)); if (*ndist < interact->param->pdis) { atrenergy = -interact->param->epsilon; } else { atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon; } /* scaling function1: dependence on the length of intersetions plus*/ f0=(length1 + length2)*0.5; /*scaling with angle*/ f1 = fabs(interact->part1->patchdir[numpatch].z); atrenergy *= f0*f1; //printf(" %f %f %f %f %f %f %f \n",conf->particle[target].pos.z*conf->box.z,atrenergy, area, length1, length2,f0,ndist); //printf("%f %f %f %f\n",pbeg.x,pbeg.y,pend.x,pend.y); } else { atrenergy = 0.0; } } else { if (*ndist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon; } /*add wall scaling wall area/ particle arear.. to reflect that we have a wall not sphere */ atrenergy *= (interact->param->rcut*interact->param->rcut - (*ndist)*(*ndist))/(interact->param->sigma*interact->param->sigma) ; } return atrenergy; } void exter2_closestdist(struct interacts * interact, BOOL *positive, BOOL *orientin, double *orient, double *rcmz,double *interendz, struct vector *project) { if (*rcmz < 0) { interact->dist = -(*rcmz); *positive = FALSE; *interendz = -1.0; project->z = 1.0; } else { interact->dist = (*rcmz); *positive = TRUE; *interendz = 1.0; project->z = -1.0; } /*psc closest is allways end closer to wall*/ if (interact->param->geotype[0] < SP ){ /*calculate closest point distance*/ if (interact->part1->dir.z > 0) { if (*positive) { *orientin = FALSE; *orient = -1.0; interact->dist = *rcmz -interact->part1->dir.z * interact->param->half_len[0]; } else { *orientin = TRUE; *orient = 1.0; interact->dist = -( *rcmz + interact->part1->dir.z * interact->param->half_len[0]); } } else { if (*positive) { *orientin = TRUE; *orient = 1.0; interact->dist = *rcmz + interact->part1->dir.z * interact->param->half_len[0]; } else { *orientin = FALSE; *orient = -1.0; interact->dist = -( *rcmz -interact->part1->dir.z * interact->param->half_len[0]); } } } } /*...........................................................................*/ /*Calculates interaction of target particle and external field calculate projection of patch of spherocylinder on wall evaluate intersection area and calculate interaction from that */ double exter_atre(struct interacts * interact,int *orientin, double *rcmz, double *interendz, BOOL *positive, double orient,struct vector *project, double *ndist,int numpatch,double halfl) { double area,a,b,c,r2; double atrenergy=0.0; /* energy*/ BOOL countend; struct vector cm1,cm2; /* centrar of interacting segments */ struct vector pbeg,pend; /* projected spherocylinder begining and end*/ struct vector inters,newdir; struct vector pbeg1,pend1,pbeg2,pend2,pextr1,pextr2,pextr3,pextr4; /*additinal point of projected patch for calculation of area */ double length1, cuttoproject, f0; int line, line1, line2,extra; struct vector partbeg,partend; /*closest and furthest point of particle*/ double erepulsive(struct interacts *); struct vector vec_perpproject(struct vector*, struct vector*); void normalise(struct vector *); double fanglscale(double, struct ia_param *, int which); struct vector vec_create(double, double, double); double areaeightpoints(struct vector*,struct vector*,struct vector*,struct vector*,struct vector*,struct vector*,struct vector*,struct vector*); int cpsc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir, struct vector* partdir, double* halfl,BOOL* orientin,BOOL* positive, double* rcmz, double * cut, struct vector* partbeg, struct vector* partend); int psc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir, struct vector* partdir, BOOL* positive, double * cut, struct vector* partbeg, struct vector* partend); int cutprojectatwall(struct vector* pextr1, struct vector* pextr2, struct vector* pextr3, struct vector* pextr4, struct vector* projectdir, struct vector* partdir, double * cutdist, struct vector *partbeg, struct vector *partend, struct vector *pend, double *cuttoproject, BOOL* orientin); void exter2_closestdist(struct interacts * interact, BOOL *positive, BOOL *orientin, double *orient, double *rcmz,double *interendz, struct vector *project); /*interaction with PATCHY SPHEROCYLINDERS*/ if ((interact->param->geotype[0] < SP)&&(interact->param->geotype[0] > SCA)) { //printf("partdir: %f %f %f \n ",interact->part1->dir.x,interact->part1->dir.y,interact->part1->dir.z); //printf("patchdir: %f %f %f \n ",interact->part1->patchdir[numpatch].x,interact->part1->patchdir[numpatch].y,interact->part1->patchdir[numpatch].z); /* calculate position of closest and furthest point (begining and end of spherocylinder)*/ a = (*orientin-0.5)*2; /*if orientin a =1 else a=-1 */ partbeg.x = a * interact->part1->dir.x * halfl; partbeg.y = a * interact->part1->dir.y * halfl; partbeg.z = *rcmz + a * interact->part1->dir.z * halfl; partend.x = - a * interact->part1->dir.x * halfl; partend.y = - a * interact->part1->dir.y * halfl; partend.z = *rcmz - a * interact->part1->dir.z * halfl; //printf("partbeg %f %f %f partend %f %f %f \n",partbeg.x,partbeg.y,partbeg.z,partend.x,partend.y,partend.z); /*calculate interacting line segment and its cm of spherocylinder*/ /*calculate end point z*/ if ( (interact->param->rcut - interact->dist)/fabs(interact->part1->dir.z) < halfl*2.0 ){ /*if cutoff goes through spherocylinder the end point is at cutoff*/ *interendz *= interact->param->rcut; } else { /*endpoint is at the end of spherocylinders*/ *interendz = partend.z; } /*calculate CM of interacting line segment of spherocylinder*/ if (*positive) { cm1.z = AVER(*interendz,interact->dist); } else { cm1.z = AVER(*interendz,-interact->dist); } if (interact->part1->dir.z != 0.0 ) { a = (*interendz - cm1.z ) / interact->part1->dir.z; length1= -orient*2.0*a; a = a + orient*halfl; } else { a = 0.0; length1 = 2.0*halfl; } //printf("len1: %f rcm %f interz %f cutoff %f \n",length1,rcmz, interendz,interact->dist); cm1.x = interact->part1->dir.x * a; cm1.y = interact->part1->dir.y * a; /*calculate projection on wall as infinite line and make it interacting segment*/ if (interact->part1->patchdir[numpatch].z != 0) { cuttoproject = -interact->param->rcut*interact->part1->patchdir[numpatch].z; /*z coordinate of point where projection is in cut distance*/ if ( ((partend.z < cuttoproject)&&(*positive)) || ((cuttoproject < partend.z)&&(!(*positive))) ){ cuttoproject = partend.z; } } else { cuttoproject = partbeg.z; } //printf("cutproject %f \n",cuttoproject); //printf("cm1 %f %f %f \n",cm1.x, cm1.y,cm1.z ); /* we have interacting segment*/ if ((interact->param->geotype[0] == CPSC)||(interact->param->geotype[0] == CHCPSC)) { /*CPSC type*/ if ( ((cuttoproject >= interact->dist)&&(*positive)) || ((cuttoproject <= -interact->dist)&&(!(*positive))) ){ /*test if projection is not all out of interaction*/ line = cpsc_wall(&pbeg,&pend,&interact->part1->patchdir[numpatch],&interact->part1->dir, \ &interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend); //printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y); } else { line = 0; } } else { /*PSC and CHPSC interaction with wall */ line = psc_wall(&pbeg,&pend,&interact->part1->patchdir[numpatch],&interact->part1->dir, \ positive,&interact->param->rcut,&partbeg,&partend); //printf("line: %d beg %f %f end %f %f \n",line,pbeg.x,pbeg.y,pend.x,pend.y); } if (line > 0) { area = 0.0; /*project cutoff boudaries*/ if (line == 2 ) { /*if projection end is on sphere of begining don't care about cylinder cutoff*/ extra = 0; } else { extra = cutprojectatwall(&pextr1, &pextr2, &pextr3, &pextr4, &interact->part1->patchdir[numpatch], \ &interact->part1->dir, &interact->param->rcut, &partbeg, &partend,&pend,&cuttoproject,orientin); } //printf("extr1: %d %f %f extr2 %f %f extr3 %f %f extr4 %f %f \n",extra,pextr1.x,pextr1.y,pextr2.x,pextr2.y,pextr3.x,pextr3.y,pextr4.x,pextr4.y); /*project patch boundaries on the first side*/ newdir=interact->part1->patchsides[0+2*numpatch]; line1 = cpsc_wall(&pbeg1,&pend1,&newdir,&interact->part1->dir, \ &interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend); if ( ((interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)) ) { line1 = psc_wall(&pbeg1,&pend1,&newdir,&interact->part1->dir, \ positive,&interact->param->rcut,&partbeg,&partend); } //printf("line1: %d beg1 %f %f end1 %f %f \n",line1,pbeg1.x,pbeg1.y,pend1.x,pend1.y); /*project patch boundaries on the second side*/ newdir=interact->part1->patchsides[1+2*numpatch]; line2 = cpsc_wall(&pbeg2,&pend2,&newdir,&interact->part1->dir, \ &interact->param->half_len[0],orientin,positive,rcmz,&interact->param->rcut,&partbeg,&partend); if ( ((interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)) ) { line2 = psc_wall(&pbeg2,&pend2,&newdir,&interact->part1->dir, \ positive,&interact->param->rcut,&partbeg,&partend); } //printf("line2: %d beg2 %f %f end2 %f %f \n",line2,pbeg2.x,pbeg2.y,pend2.x,pend2.y); /*calculate area*/ if (extra == 0) { /*thish should only happen when there is PSC interacting only with end*/ if (line1 == 0) { if (line2==0) { /*circle around middle-pbeg*/ area = PI*( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); } else{ /* circle around middle-pbeg minus circle segment*/ a = AVER(pbeg2.x,pend2.x); b = AVER(pbeg2.y,pend2.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/ area = r2*(PI-acos(sqrt(c/r2))) + sqrt(r2*c-c*c); } } else { if (line2==0) { /* circle around middle-pbeg minus circle segment*/ a = AVER(pbeg1.x,pend1.x); b = AVER(pbeg1.y,pend1.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/ area = r2*(PI-acos(sqrt(c/r2))) + sqrt(r2*c-c*c); } else { //area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 E E1 B1 */ /*circle minus two circle segments*/ r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/ area = r2*PI; a = AVER(pbeg1.x,pend1.x); b = AVER(pbeg1.y,pend1.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ area += -r2*acos(sqrt(c/r2)) + sqrt(r2*c-c*c); a = AVER(pbeg2.x,pend2.x); b = AVER(pbeg2.y,pend2.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ area += -r2*acos(sqrt(c/r2)) + sqrt(r2*c-c*c); } } } else { b = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend.y)- (pextr2.x-pend.x)*(pextr4.y-pextr2.y));/*pend on 42*/ c = fabs((pextr1.x-pextr3.x)*(pextr3.y-pend.y)- (pextr3.x-pend.x)*(pextr1.y-pextr3.y));/*pend on 13*/ if ( ( b< ZEROTOL) || ( c< ZEROTOL) ) countend = FALSE; else countend = TRUE; if (line1 == 0) { if (line2 == 0) { if ( countend ) { area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pextr3,&pextr1,NULL,NULL);/* B 2 4 E 3 1 */ } else area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pextr3,&pextr1,NULL,NULL,NULL);/* B 2 4 3 1 */ } else { a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend2.y)- (pextr2.x-pend2.x)*(pextr4.y-pextr2.y)); if ( a< ZEROTOL) /*pend2 on 42*/ { if ( countend ) { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pextr1,NULL); /* B B2 E2 4 E 3 1 */ } else { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pextr1,NULL,NULL); /* B B2 E2 4 3 1 */ } } else { a = fabs((pextr1.x-pextr3.x)*(pextr3.y-pend2.y)- (pextr3.x-pend2.x)*(pextr1.y-pextr3.y)); if ( a< ZEROTOL) /*pend2 on 13*/ { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr1,NULL,NULL,NULL,NULL); /* B B2 E2 1 */ } else { /*pend2 on 34 or on begining sphere of psc*/ if (line2 == 2) { if ( countend ) { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pextr1,NULL); /* B B2 E2 4 E 3 1 */ } else { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pextr1,NULL,NULL); /* B B2 E2 4 3 1 */ } } else { if ( countend ) { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pextr3,&pextr1,NULL,NULL); /* B B2 E2 E 3 1 */ } else { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr3,&pextr1,NULL,NULL,NULL); /* B B2 E2 3 1 */ } } } } } } else { a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend1.y)- (pextr2.x-pend1.x)*(pextr4.y-pextr2.y)); if ( a< ZEROTOL) /*pend1 on 42*/ { if (line2 == 0) { area = areaeightpoints(&pbeg,&pextr2,&pend1,&pbeg1,NULL,NULL,NULL,NULL); /* B 2 E1 B1 */ } else { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend1,&pbeg1,NULL,NULL,NULL); /* B B2 E2 E1 B1 */ } } else { a = fabs((pextr1.x-pextr3.x)*(pextr3.y-pend1.y)- (pextr3.x-pend1.x)*(pextr1.y-pextr3.y)); if ( a< ZEROTOL) /*pend1 on 13*/ { if (line2 == 0) { if (countend) { area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B 2 4 E 3 E1 B1 */ } else { area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B 2 4 3 E1 B1 */ } } else { a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend2.y)- (pextr2.x-pend2.x)*(pextr4.y-pextr2.y)); if ( a< ZEROTOL) /*pend2 on 42*/ { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pend1,&pbeg1); /* B B2 E2 4 E 3 E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 4 3 E1 B1 */ } else { a = fabs((pextr3.x-pextr1.x)*(pextr1.y-pend2.y)- (pextr1.x-pend2.x)*(pextr3.y-pextr1.y)); if ( a< ZEROTOL) /*pend2 on 31*/ { area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend1,&pbeg1,NULL,NULL,NULL); /* B B2 E2 E1 B1 */ } else { /*pend2 close to 34 or on begining sphere of psc*/ if (line2 == 2) { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pend1,&pbeg1); /* B B2 E2 4 E 3 E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 4 3 E1 B1 */ } else { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 E 3 E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 3 E1 B1 */ } } } } } else {/*pend1 close to 34 or on beging sphere for psc*/ if (line2 == 0) { if (line1 ==2) { if (countend) area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B 2 4 E 3 E1 B1*/ else { area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B 2 4 3 E1 B1*/ } } else { if (countend) area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend,&pend1,&pbeg1,NULL,NULL); /* B 2 4 E E1 B1*/ else { area = areaeightpoints(&pbeg,&pextr2,&pextr4,&pend1,&pbeg1,NULL,NULL,NULL); /* B 2 4 E1 B1*/ } } } else { a = fabs((pextr4.x-pextr2.x)*(pextr2.y-pend2.y)- (pextr2.x-pend2.x)*(pextr4.y-pextr2.y)); if ( a< ZEROTOL) /* pend2 on 42 */ { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pend1,&pbeg1,NULL); /* B B2 E2 4 E E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 4 E1 B1 */ } else { /*pend1 and pend2 close to 34 or on beging sphere for psc*/ if (line2 == 2) { if (line1 == 2) { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pextr3,&pend1,&pbeg1); /* B B2 E2 4 E 3 E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 4 3 E1 B1 */ } else { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend,&pend1,&pbeg1,NULL); /* B B2 E2 4 E E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr4,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 4 E1 B1 */ } } else { if (line1 == 2) { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pextr3,&pend1,&pbeg1,NULL); /* B B2 E2 E 3 E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pextr3,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 3 E1 B1 */ } else { if (countend) area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend,&pend1,&pbeg1,NULL,NULL); /* B B2 E2 E E1 B1 */ else area = areaeightpoints(&pbeg,&pbeg2,&pend2,&pend1,&pbeg1,NULL,NULL,NULL); /* B B2 E2 E1 B1 */ } } } } } } } /*extra != 0*/ if ((interact->param->geotype[0] == PSC)||(interact->param->geotype[0] == CHPSC)) { if (line1==2) { /* add circle segment*/ a = AVER(pextr1.x,pend1.x); /*end to cutoff - pextr1 ,pend1 */ b = AVER(pextr1.y,pend1.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ r2 = ( (partbeg.x-pend1.x)*(partbeg.x-pend1.x) + (partbeg.y-pend1.y)*(partbeg.y-pend1.y)); /*radius squared*/ area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c); a = AVER(pbeg.x,pbeg1.x); /* between beginings - pbeg ,pbeg1 */ b = AVER(pbeg.y,pbeg1.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c); } else { if (line1==0) { /* add circle segment*/ a = AVER(pextr1.x,pbeg.x); /* begining to cutoff*/ b = AVER(pextr1.y,pbeg.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/ area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c); } } if (line2==2) { /* add circle segment*/ a = AVER(pextr3.x,pend2.x); /*end to cutoff - pextr3 ,pend2 */ b = AVER(pextr3.y,pend2.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ r2 = ( (partbeg.x-pend2.x)*(partbeg.x-pend2.x) + (partbeg.y-pend2.y)*(partbeg.y-pend2.y)); /*radius squared*/ area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c); a = AVER(pbeg.x,pbeg2.x); /* between beginings - pbeg ,pbeg2 */ b = AVER(pbeg.y,pbeg2.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c); } else { if (line2==0) { /* add circle segment*/ a = AVER(pextr3.x,pbeg.x); /* begining to cutoff*/ b = AVER(pextr3.y,pbeg.y); c = (partbeg.x-a)*(partbeg.x-a) + (partbeg.y-b)*(partbeg.y-b); /*height of triangle to segment*/ r2 = ( (partbeg.x-pbeg.x)*(partbeg.x-pbeg.x) + (partbeg.y-pbeg.y)*(partbeg.y-pbeg.y)); /*radius squared*/ area += r2*acos(sqrt(c/r2)) - sqrt(r2*c-c*c); } } } } /*area finished*/ /*cm2 by average begining and end*/ cm2.x = AVER(pbeg.x,pend.x); cm2.y = AVER(pbeg.y,pend.y); cm2.z = 0.0; /*length by size of end-benining*/ //length2 = sqrt( (pend.x-pbeg.x)*(pend.x-pbeg.x)+(pend.y-pbeg.y)*(pend.y-pbeg.y) ); inters.x = cm2.x - cm1.x; inters.y = cm2.y - cm1.y; inters.z = cm2.z - cm1.z; //printf("cm2 %f %f %f inters %f %f %f \n",cm2.x,cm2.y,cm2.z,inters.x,inters.y,inters.z); *ndist = sqrt(DOT(inters,inters)); if (*ndist < interact->param->pdis) { atrenergy = -interact->param->epsilon; } else { atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon; } /* scaling function1: dependence on the length of intersetions plus SCALING WITH AREA*/ f0=(length1 + area / interact->param->sigma)*0.5; atrenergy *= f0; //printf(" %f %f %f %f %f %f %f %d %d %d \n",conf->particle[target].pos.z*conf->box.z,atrenergy, area, length1, length2,f0,ndist,extra,line1,line2); //printf("%f %f %f %f\n",pbeg.x,pbeg.y,pend.x,pend.y); //printf("%f %f %f %f %f %f\n",pbeg2.x,pend2.y,pextr2.x,pextr2.y,pextr1.x,pextr1.y); } else { atrenergy = 0.0; } } else { if (*ndist < interact->param->pdis) atrenergy = -interact->param->epsilon; else { atrenergy= cos(PIH*(*ndist-interact->param->pdis)/interact->param->pswitch); atrenergy *= -atrenergy*interact->param->epsilon; } /*add wall scaling wall area/ particle arear.. to reflect that we have a wall not sphere */ atrenergy *= (interact->param->rcut*interact->param->rcut - (*ndist)*(*ndist))/(interact->param->sigma*interact->param->sigma) ; } //printf("%f %f \n",conf->particle[target].pos.z*conf->box.z,atrenergy); return atrenergy; } /*..............................................................................*/ /* Initializes the array with the pointers to the energy function */ void init_intfce(double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo){ // NB // Fill in the names of the functions for calculating the // interaction energy long geotype, other_geotype; int i, j; for(i = 0; i < MAXT; i++){ for(j = 0; j < MAXT; j++){ /* Initialize them as not existing */ intfce[i][j] = &enoexist; geotype = topo->ia_params[i][j].geotype[0]; other_geotype = topo->ia_params[i][j].geotype[1]; if ( ( (geotype == CHCPSC || geotype == CPSC || geotype == TCHCPSC || geotype == TCPSC) && (other_geotype == CHPSC || other_geotype == PSC || other_geotype == TCHPSC || other_geotype == TPSC) ) || ( (geotype == CHPSC || geotype == PSC || geotype == TCHPSC || geotype == TPSC) && (other_geotype == CHCPSC || other_geotype == CPSC || other_geotype == TCHCPSC || other_geotype == TCPSC) ) ) { intfce[i][j] = &e_psc_cpsc; } if ( (geotype == CHCPSC || geotype == CPSC || geotype == TCHCPSC || geotype == TCPSC) && (other_geotype == CHCPSC || other_geotype == CPSC || other_geotype == TCHCPSC || other_geotype == TCPSC) ){ intfce[i][j] = &e_cpsc_cpsc; } if ( (geotype == CHPSC || geotype == PSC || geotype == TCHPSC || geotype == TPSC) && (other_geotype == CHPSC || other_geotype == PSC || other_geotype == TCHPSC || other_geotype == TPSC) ){ intfce[i][j] = &e_psc_psc; } if(geotype == SCN || geotype == SPN || other_geotype == SCN || other_geotype == SPN){ intfce[i][j] = &e_spn_or_scn; } if((geotype == SCA && other_geotype == SCA) || (geotype == SPA && other_geotype == SPA)){ intfce[i][j] = &e_2sca_or_2spa; } if((geotype == SCA && other_geotype == SPA) || (geotype == SPA && other_geotype == SCA)){ intfce[i][j] = &e_spa_sca; } if(( (geotype == PSC || geotype == CHPSC || geotype == TCHPSC || geotype == TPSC) && other_geotype == SPA) || (geotype == SPA && (other_geotype == PSC||other_geotype == CHPSC || other_geotype == TCHPSC || other_geotype == TPSC) )){ intfce[i][j] = &e_psc_spa; } if(( (geotype == CPSC ||geotype == CHCPSC || geotype == TCHCPSC || geotype == TCPSC) && other_geotype == SPA) || (geotype == SPA && (other_geotype == CPSC||other_geotype == CHCPSC || other_geotype == TCHCPSC || other_geotype == TCPSC) )){ intfce[i][j] = &e_cpsc_spa; } } } } /*..............................................................................*/ /* Compare energy change to temperature and based on Boltzmann probability return either 0 to accept or 1 to reject the move */ int movetry(double energyold, double energynew, double temperature) { double ran2(long *); /*DEBUG printf (" Move trial: %13.8lf %13.8lf %13.8lf %13.8lf\n", energynew, energyold, temperature, ran2(&seed));*/ if (energynew <= energyold ) return 0; else if (exp(-1.0*(energynew-energyold)/temperature) > ran2(&seed)) return 0; else return 1; } /*..............................................................................*/ /* * Calculate the different energy contributions. This is a merge of the different * energy calculation functions (energyone, -chain, -all) * 0: all * 1: one * 2: chain */ double calc_energy(long target, double (* intfce[MAXT][MAXT])(struct interacts *), int mode, struct topo * topo, struct conf * conf, struct sim * sim, int chainnum) { long i=0,j=0; double paire(long, long, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct conf * conf); // double extere(long, struct topo * topo, struct conf * conf); double extere2(long, struct topo * topo, struct conf * conf); //DEBUG_SIM("Calculate the energy with mode %d", mode) double energy = 0; /* Calculates energy between particle "target" and the rest. Returns energy */ if(mode == 1){ if (sim->pairlist_update) { #ifdef OMP #pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic) #endif for (i = 0; i < sim->pairlist[target].num_pairs; i++){ energy+= paire(target, sim->pairlist[target].pairs[i], intfce, topo, conf); } } else{ #ifdef OMP #pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic) #endif for (i = 0; i < target; i++) { energy+= paire(target, i, intfce, topo, conf); } #ifdef OMP #pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic) #endif for (i = target + 1; i < topo->npart; i++) { energy+= paire(target, i, intfce, topo, conf); } } /*add interaction with external potential*/ if (topo->exter.exist) energy+= extere2(target,topo,conf); } /* * Calculates energy between particle "target" and the rest. skipping * particles from the given chain -particles has to be sorted in chain!! * so similar to energy one but with chain exception */ else if(mode == 2){ //#ifdef OMP //#pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic) //#endif for (i = 0; i < target; i++) { if (i != topo->chainlist[chainnum][j]) { energy+= paire(target, i, intfce, topo, conf); } else { j++; } } j++; //#ifdef OMP //#pragma omp parallel for private(i) reduction (+:energy) schedule (dynamic) //#endif for (i = target + 1; i < topo->npart; i++) { if (i != topo->chainlist[chainnum][j]) { energy+= paire(target, i, intfce, topo, conf); } else { j++; } } /*add interaction with external potential*/ if (topo->exter.exist) energy+= extere2(target,topo,conf); } /* Calculates energy between all pairs. Returns energy */ else if(mode == 0){ #ifdef OMP #pragma omp parallel for private(i,j) reduction (+:energy) schedule (dynamic) #endif for (i = 0; i < topo->npart - 1; i++) { for (j = i + 1; j < topo->npart; j++) { energy+= paire(i, j, intfce, topo, conf); } /*for every particle add interaction with external potential*/ if (topo->exter.exist) energy+= extere2(i,topo,conf); } /*add interaction of last particle with external potential*/ if (topo->exter.exist) energy+= extere2(topo->npart-1,topo,conf); } else { fprintf(stderr, "ERROR: Wrong mode (%d) was given to calc_energy!", mode); return 0.0; } // DEBUG_SIM("Will return energy from calc_energy") //printf("energymove %f\n",energy); return energy; } /*..............................................................................*/ /* Checks for overlaps between particle "target" and the rest. Returns 1 if overlap detected, 0 otherwise. */ int forbidden(long npart, struct particles *particle, long target, struct vector box, struct ia_param ia_params[MAXT][MAXT]) { long test; int overlap(struct particles, struct particles, struct vector,struct ia_param [MAXT][MAXT]); for (test=0; test<npart; test++) { if (test != target) { if ( overlap(particle[target], particle[test], box, ia_params) ) { return 1; } } } return 0; } /*..............................................................................*/ /* Checks for overlaps between all pairs of particles. Returns 1 if overlap detected, 0 otherwise. */ int checkall(long npart, struct particles *particle, struct vector box, struct ia_param ia_params[MAXT][MAXT]) { long i, j; int overlap(struct particles, struct particles, struct vector, struct ia_param [MAXT][MAXT]); for (i=0; i<npart-1; i++) { for (j=i+1; j<npart; j++) { if ( overlap(particle[i], particle[j], box, ia_params) ) { return 1; } } } return 0; } /*..............................................................................*/ /* Optimize the maximum displacement within the specified limits and resets the acceptance counters to zero. */ void optimizestep(struct disp *x, double hi, double lo) { double newrmsd; newrmsd = (*x).mx * RATIO(*x); if ((*x).oldrmsd > 0) { if ( newrmsd < (*x).oldrmsd ) { if ( (*x).oldmx > 1 ) { (*x).mx /= 1.05; (*x).oldmx = 0.95; } else { (*x).mx *= 1.05; (*x).oldmx = 1.05; } } else { if ( (*x).oldmx > 1 ) { (*x).mx *= 1.05; (*x).oldmx = 1.05; } else { (*x).mx /= 1.05; (*x).oldmx = 0.95; } } } if (newrmsd > 0 ) (*x).oldrmsd = newrmsd; else { (*x).oldrmsd = 0.0; (*x).mx /= 1.05; (*x).oldmx = 0.95; } if ( (*x).mx > hi ) (*x).mx = hi; if ( (*x).mx < lo ) (*x).mx = lo; (*x).acc = (*x).rej = 0; } /*..............................................................................*/ /* Optimize the maximum rotation within the specified limits and resets the acceptance counters to zero. Rotation is given by cos of angle larger rotation = smaller cos */ void optimizerot(struct disp *x, double hi, double lo) { double newrmsd; newrmsd = (*x).mx * RATIO((*x)) ; if ((*x).oldrmsd > 0) { if ( newrmsd > (*x).oldrmsd ) { if ( (*x).oldmx > 1) { (*x).mx *= 0.99; (*x).oldmx *= 0.99; } else { (*x).mx *= 1.01; (*x).oldmx *= 1.01; } } else { if ( (*x).oldmx > 1) { (*x).mx *= 1.01; (*x).oldmx *= 1.01; } else { (*x).mx *= 0.99; (*x).oldmx *= 0.99; } } } if (newrmsd > 0 ) (*x).oldrmsd = newrmsd; else { (*x).oldrmsd = 0.0; (*x).mx *= 1.01; (*x).oldmx = 1.01; } if ( (*x).mx > hi ) (*x).mx = hi; if ( (*x).mx < lo ) (*x).mx = lo; (*x).acc = (*x).rej = 0; } /*................................................................................*/ /* Accumulate a value into the statistics and update the mean and rms values. */ void accumulate(struct stat *q, double x) { (*q).sum += x; (*q).sum2 += x*x; (*q).samples++; (*q).mean = (*q).sum / (*q).samples; (*q).rms = sqrt(fabs((*q).sum2 / (*q).samples - (*q).sum * (*q).sum / (*q).samples / (*q).samples)); } void printeqstat(struct disp *dat, double scale, int length) { int i; for (i=0;i<length;i++) { if (RATIO(dat[i]) > 0) printf (" TYPE %d %.6lf / %.6lf\n", i, dat[i].mx/scale,RATIO(dat[i])); } } int memoryalloc(struct conf * conf) { printf ("Allocating memory...\n"); conf->particle = malloc( sizeof(struct particles)*MAXN); if(conf->particle == NULL){ return 1; } return 0; } int memorydealloc(struct conf * conf, struct topo * topo, struct sim * sim) { int dealloc_pairlist(struct topo * topo, struct sim * sim); printf ("Deallocating memory...\n"); if (conf->particle != NULL) free(conf->particle); conf->particle = NULL; if (sim->clusterlist != NULL) free(sim->clusterlist); if (sim->clustersenergy != NULL) free(sim->clustersenergy); if(topo->switchlist){ free(topo->switchlist); } if (sim->pairlist_update) { if(dealloc_pairlist(topo, sim)){ return 1; } } return 0; } /*............................................................................*/ /** * nice malloc, which does the error checking for us */ void * xmalloc (size_t num){ void *new = malloc (num); if (!new){ fprintf(stderr, "Couldn't allocate any memory!\n"); exit(1); } return new; } /*............................................................................*/ /* *********************** GEOMETRICAL FUNCTIONS **************************** */ /*.........................PATCHY SPOHEROCYLINDERS INTERACTION....................*/ /*................................................................................*/ /* Calculate intersections of sc2 with a patch of sc1 and return them in */ int psc_intersect(struct particles * part1, struct particles * part2, double halfl1, double halfl2, struct vector r_cm, double intersections[5], double rcut, struct ia_param * param, int which, int patchnum) { int intrs; double a, b, c, d, e, x1, x2, rcut2; struct vector cm21, vec1, vec2, vec3, vec4; struct vector vec_crossproduct(struct vector, struct vector); struct vector vec_sub(struct vector, struct vector); struct vector vec_create(double, double, double); struct vector vec_scale(struct vector, double); struct vector vec_perpproject(struct vector*, struct vector*); struct quat quat_create(struct vector, double, double); void vec_rotate(struct vector *, struct quat); void normalise(struct vector *); int find_intersect_plane(struct particles *, struct particles *, double, struct vector, struct vector, double, double, double *); int test_intrpatch(struct particles *, struct vector, double, double, double *,int); intrs=0; rcut2=rcut*rcut; /*1- do intersections of spherocylinder2 with patch of spherocylinder1 at cut distance C*/ /*1a- test intersection with half planes of patch and look how far they are from spherocylinder. If closer then C we got itersection*/ /* plane1 */ /* find intersections of part2 with plane by par1 and patchsides[0] */ intrs+=find_intersect_plane(part1,part2,halfl2,r_cm,part1->patchsides[0+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections); // printf("plane1 %d\n", intrs); /* plane2 */ /* find intersections of part2 with plane by par1 and patchsides[1] */ intrs+=find_intersect_plane(part1,part2,halfl2,r_cm,part1->patchsides[1+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections); if ( (intrs == 2 ) && (param->pcanglsw[which+2*patchnum] <0) ) { fprintf (stderr, "ERROR: Patch is larger than 180 degrees and we are getting two segments - this hasnot been programed yet.\n\n"); exit (1); } // printf("plane2 %d\n", intrs); /*1b- test intersection with cylinder - it is at distance C*/ if (intrs < 2 ) { cm21=vec_scale(r_cm,-1.0); vec1=vec_crossproduct(cm21,part1->dir); vec2=vec_crossproduct(part2->dir,part1->dir); a = DOT(vec2,vec2); b = 2*DOT(vec1,vec2); c = -rcut*rcut + DOT(vec1,vec1); d = b*b - 4*a*c; if ( d >= 0) { /*there is intersection with infinite cylinder */ x1 = (-b+sqrt(d))*0.5/a;/*parameter on line of SC2 determining intersection*/ if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { /* vectors from center os sc1 to intersection with infinite cylinder*/ vec1.x=part2->dir.x*x1-r_cm.x; vec1.y=part2->dir.y*x1-r_cm.y; vec1.z=part2->dir.z*x1-r_cm.z; e = DOT(part1->dir,vec1); if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/ else { intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum); } } if ( d > 0 ){ x2 = (-b-sqrt(d))*0.5/a;/*parameter on line of SC2 determining intersection*/ if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { vec2.x = part2->dir.x*x2-r_cm.x; vec2.y = part2->dir.y*x2-r_cm.y; vec2.z = part2->dir.z*x2-r_cm.z; e = DOT(part1->dir,vec2); if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/ else { intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum); } } } } } // printf ("cylinder %d x1 %f x2 %f e %f\n", intrs, x1, x2, e); /*1c- test intersection with spheres at the end - it is at distace C*/ if (intrs < 2 ) { /*centers of spheres*/ /*relative to the CM of sc2*/ vec1.x = part1->dir.x*halfl1 - r_cm.x; vec1.y = part1->dir.y*halfl1 - r_cm.y; vec1.z = part1->dir.z*halfl1 - r_cm.z; vec2.x = -part1->dir.x*halfl1 - r_cm.x; vec2.y = -part1->dir.y*halfl1 - r_cm.y; vec2.z = -part1->dir.z*halfl1 - r_cm.z; /*sphere1*/ a = DOT(part2->dir,part2->dir); b = 2.0*DOT(vec1,part2->dir); c = DOT(vec1,vec1)-rcut*rcut; d = b*b-4*a*c; if (d >= 0) { /*if d<0 there are no intersections*/ x1= (-b + sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/ if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { vec3.x = part2->dir.x*x1-r_cm.x; vec3.y = part2->dir.y*x1-r_cm.y; vec3.z = part2->dir.z*x1-r_cm.z; e = DOT(part1->dir,vec3); if ((e >= halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/ intrs+=test_intrpatch(part1,vec3,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum); } } if ( d > 0) { x2= (-b - sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/ if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { vec4.x = part2->dir.x*x2 - r_cm.x; vec4.y = part2->dir.y*x2 - r_cm.y; vec4.z = part2->dir.z*x2 - r_cm.z; e = DOT(part1->dir,vec4); if ((e >=halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/ intrs+=test_intrpatch(part1,vec4,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum); } } } } // printf ("sphere1 %d x1 %f x2 %f e %f\n", intrs, x1, x2, e); /*sphere2*/ a = DOT(part2->dir,part2->dir); b = 2.0*DOT(vec2,part2->dir); c = DOT(vec2,vec2)-rcut*rcut; d = b*b-4*a*c; if (d >= 0) { /*if d<0 there are no intersections*/ x1= (-b + sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/ if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { vec3.x = part2->dir.x*x1 - r_cm.x; vec3.y = part2->dir.y*x1 - r_cm.y; vec3.z = part2->dir.z*x1 - r_cm.z; e = DOT(part1->dir,vec3); if ((e >=halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/ intrs+=test_intrpatch(part1,vec3,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum); } } if ( d > 0 ) { x2= (-b - sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/ if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { vec4.x = part2->dir.x*x2 - r_cm.x; vec4.y = part2->dir.y*x2 - r_cm.y; vec4.z = part2->dir.z*x2 - r_cm.z; e = DOT(part1->dir,vec4); if ((e >=halfl1) || (e <= -halfl1)) { /*if not intersection is inside sc1*/ intrs+=test_intrpatch(part1,vec4,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum); } } } } // printf ("sphere2 %d\n", intrs); } /*1d- if there is only one itersection shperocylinder ends within patch wedge set as second intersection end inside patch*/ if (intrs < 2 ) { /*whole spherocylinder is in or all out if intrs ==0*/ vec1.x = part2->dir.x*halfl2 - r_cm.x; vec1.y = part2->dir.y*halfl2 - r_cm.y; vec1.z = part2->dir.z*halfl2 - r_cm.z; /*vector from CM of sc1 to end of sc2*/ /*check is is inside sc1*/ a=DOT(vec1,part1->dir); vec3.x = vec1.x - part1->dir.x*a; vec3.y = vec1.y - part1->dir.y*a; vec3.z = vec1.z - part1->dir.z*a; b=DOT(vec3,vec3); d = fabs(a)-halfl1; if ( d <= 0) c = b; /*is inside cylindrical part*/ else c = d*d + b; /*is inside caps*/ /*c is distance squared from line or end to test if is inside sc*/ if (c < rcut2) intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],halfl2,intersections,patchnum); if (intrs < 2 ) { vec2.x = -part2->dir.x*halfl2 - r_cm.x; vec2.y = -part2->dir.y*halfl2 - r_cm.y; vec2.z = -part2->dir.z*halfl2 - r_cm.z; /*check is is inside sc1*/ a=DOT(vec2,part1->dir); vec4.x = vec2.x - part1->dir.x*a; vec4.y = vec2.y - part1->dir.y*a; vec4.z = vec2.z - part1->dir.z*a; b=DOT(vec4,vec4); d = fabs(a) -halfl1; if (d <= 0) c = b; /*is inside cylindrical part*/ else c = d*d + b; /*is inside caps*/ /*c is distance squared from line or end to test if is inside sc*/ if (c < rcut2) intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],-1.0*halfl2,intersections,patchnum); } // printf ("ends %d\n", intrs); } return intrs; } /*................................................................................*/ /* Find if vector vec has angular intersection with patch of sc1 */ int test_intrpatch(struct particles * part1, struct vector vec, double cospatch, double ti, double intersections[5],int patchnum) { double a; int i, intrs; struct vector vec_perpproject(struct vector*, struct vector*); void normalise(struct vector *); intrs=0; /*test if we have intersection*/ /* do projection to patch plane*/ vec=vec_perpproject(&vec,&part1->dir); normalise(&vec); /* test angle distance from patch*/ a = DOT(part1->patchdir[patchnum],vec); if (a >= cospatch) { intrs=1; i=0; while (intersections[i] !=0) { if (ti == intersections[i]) intrs=0; /* found intersection we already have -it is at boundary*/ i++; } if (intrs > 0) intersections[i]=ti; } return intrs; } /*................................................................................*/ /* Find intersections of SC and plane defined by vector w_vec.and returns number of them */ int find_intersect_plane(struct particles * part1, struct particles * part2, double halfl2, struct vector r_cm, struct vector w_vec, double rcut, double cospatch, double intersections[5]) { int i, intrs; double a, c, d, ti, disti; struct vector nplane, d_vec; void normalise(struct vector *); struct vector vec_crossproduct(struct vector, struct vector); nplane=vec_crossproduct(part1->dir,w_vec); normalise(&nplane); normalise(&w_vec); a = DOT(nplane, part2->dir); if (a == 0.0) intrs=0; /* there is no intersection plane and sc are paralel*/ else { ti = DOT(nplane,r_cm)/a; if ((ti > halfl2 ) || (ti < -halfl2)) intrs=0; /* there is no intersection plane sc is too short*/ else { d_vec.x = ti * part2->dir.x - r_cm.x; /*vector from intersection point to CM*/ d_vec.y = ti * part2->dir.y - r_cm.y; d_vec.z = ti * part2->dir.z - r_cm.z; c = DOT (d_vec, w_vec); if ( c * cospatch < 0) intrs=0; /* the intersection in plane is on other side of patch */ else { d = fabs(DOT (d_vec, part1->dir)) - halfl2; if (d <= 0) disti = c*c; /*is inside cylinder*/ else disti = d*d + c*c; /*is inside patch*/ if (disti > rcut*rcut) intrs=0; /* the intersection is outside sc */ else { intrs=1; i=0; while (intersections[i] !=0) { if (ti == intersections[i]) intrs=0; /* found intersection we already have -it is at boundary*/ i++; } if (intrs > 0) { intersections[i]=ti; } } } } } return intrs; } /*CPSC................................................................................*/ /* Calculate intersections of sc2 with a patch of sc1 and return them in this works for cylindrical psc -CPSC */ int cpsc_intersect(struct particles * part1, struct particles * part2, double halfl1, double halfl2, struct vector r_cm, double intersections[5], double rcut, struct ia_param * param, int which, int patchnum) { int intrs; double a, b, c, d, e, x1, x2, rcut2; struct vector cm21, vec1, vec2, vec3, vec4; struct vector vec_crossproduct(struct vector, struct vector); struct vector vec_sub(struct vector, struct vector); struct vector vec_create(double, double, double); struct vector vec_scale(struct vector, double); struct vector vec_perpproject(struct vector*, struct vector*); struct quat quat_create(struct vector, double, double); void vec_rotate(struct vector *, struct quat); void normalise(struct vector *); int find_intersect_planec(struct particles *, struct particles *, double, struct vector, struct vector, double, double, double *); int test_intrpatch(struct particles *, struct vector, double, double, double *, int); intrs=0; rcut2=rcut*rcut; /*1- do intersections of spherocylinder2 with patch of spherocylinder1 at cut distance C*/ /*1a- test intersection with half planes of patch and look how far they are from spherocylinder. If closer then C we got itersection*/ /* plane1 */ /* find intersections of part2 with plane by par1 and part1->patchsides[0] */ intrs+=find_intersect_planec(part1,part2,halfl2,r_cm,part1->patchsides[0+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections); // printf("plane1 %d\n", intrs); /* plane2 */ /* find intersections of part2 with plane by par1 and part1->patchsides[1] */ intrs+=find_intersect_planec(part1,part2,halfl2,r_cm,part1->patchsides[1+2*patchnum],rcut,param->pcanglsw[which+2*patchnum],intersections); if ( (intrs == 2 ) && (param->pcanglsw[which+2*patchnum] < 0) ) { fprintf (stderr, "ERROR: Patch is larger than 180 degrees and we are getting two segments - this hasnot been programed yet.\n\n"); exit (1); } // printf("plane2 %d\n", intrs); /*1b- test intersection with cylinder - it is at distance C*/ if (intrs < 2 ) { cm21=vec_scale(r_cm,-1.0); vec1=vec_crossproduct(cm21,part1->dir); vec2=vec_crossproduct(part2->dir,part1->dir); a = DOT(vec2,vec2); b = 2*DOT(vec1,vec2); c = -rcut*rcut + DOT(vec1,vec1); d = b*b - 4*a*c; if ( d >= 0) { /*there is intersection with infinite cylinder */ x1 = (-b+sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/ if ((x1 >=halfl2) || (x1 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { /* vectors from center os sc1 to intersection with infinite cylinder*/ vec1.x=part2->dir.x*x1-r_cm.x; vec1.y=part2->dir.y*x1-r_cm.y; vec1.z=part2->dir.z*x1-r_cm.z; e = DOT(part1->dir,vec1); if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/ else { intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum); } } if ( d > 0 ){ x2 = (-b-sqrt(d))*0.5/a; /*parameter on line of SC2 determining intersection*/ if ((x2 >=halfl2) || (x2 <= -halfl2)) intrs+=0; /*intersection is outside sc2*/ else { vec2.x = part2->dir.x*x2-r_cm.x; vec2.y = part2->dir.y*x2-r_cm.y; vec2.z = part2->dir.z*x2-r_cm.z; e = DOT(part1->dir,vec2); if ((e >=halfl1) || (e <= -halfl1)) intrs+=0; /*intersection is outside sc1*/ else { intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum); } } } } } // printf ("cylinder %d x1 %f x2 %f e %f\n", intrs, x1, x2, e); /*1c- test intersection with plates at the end - it is at distace C and in wedge*/ if (intrs < 2 ) { a = DOT(part1->dir, part2->dir); if (a == 0.0) intrs=0; /* there is no intersection plane and sc are paralel*/ else { /*plane cap1*/ vec1.x= r_cm.x + halfl1*part1->dir.x; vec1.y= r_cm.y + halfl1*part1->dir.y; vec1.z= r_cm.z + halfl1*part1->dir.z; x1 = DOT(part1->dir,vec1)/a; /*parameter on line of SC2 determining intersection*/ if ((x1 > halfl2 ) || (x1 < -halfl2)) intrs+=0; /* there is no intersection plane sc is too short*/ else { vec2.x = x1*part2->dir.x - vec1.x; /*vector from ENDPOINT to intersection point */ vec2.y = x1*part2->dir.y - vec1.y; vec2.z = x1*part2->dir.z - vec1.z; b = DOT (vec2, vec2); if (b > rcut*rcut) intrs+=0; /* the intersection is outside sc */ else { intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x1,intersections,patchnum); } } // printf ("plane cap1 %d %f\n", intrs, x1); /*plane cap2*/ vec1.x= r_cm.x - halfl1*part1->dir.x; vec1.y= r_cm.y - halfl1*part1->dir.y; vec1.z= r_cm.z - halfl1*part1->dir.z; x2 = DOT(part1->dir,vec1)/a; /*parameter on line of SC2 determining intersection*/ if ((x2 > halfl2 ) || (x2 < -halfl2)) intrs+=0; /* there is no intersection plane sc is too short*/ else { vec2.x = x2*part2->dir.x - vec1.x; /*vector from ENDPOINT to intersection point */ vec2.y = x2*part2->dir.y - vec1.y; vec2.z = x2*part2->dir.z - vec1.z; b = DOT (vec2, vec2); if (b > rcut*rcut) intrs+=0; /* the intersection is outside sc */ else { intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],x2,intersections,patchnum); } } // printf ("plane cap2 %d %f\n", intrs,x2); } } /*1d- if there is only one itersection shperocylinder ends within patch wedge set as second intersection end inside patch*/ if (intrs < 2 ) { /*whole spherocylinder is in or all out if intrs ==0*/ vec1.x = part2->dir.x*halfl2 - r_cm.x; vec1.y = part2->dir.y*halfl2 - r_cm.y; vec1.z = part2->dir.z*halfl2 - r_cm.z; /*vector from CM of sc1 to end of sc2*/ /*check is is inside sc1*/ a=DOT(vec1,part1->dir); vec3.x = vec1.x - part1->dir.x*a; vec3.y = vec1.y - part1->dir.y*a; vec3.z = vec1.z - part1->dir.z*a; b=DOT(vec3,vec3); d = fabs(a)-halfl1; if ( d <= 0) { /*is in cylindrical part*/ /*c is distance squared from line or end to test if is inside sc*/ if (b < rcut2) intrs+=test_intrpatch(part1,vec1,param->pcanglsw[which+2*patchnum],halfl2,intersections,patchnum); } if (intrs < 2 ) { vec2.x = -part2->dir.x*halfl2 - r_cm.x; vec2.y = -part2->dir.y*halfl2 - r_cm.y; vec2.z = -part2->dir.z*halfl2 - r_cm.z; /*check is is inside sc1*/ a=DOT(vec2,part1->dir); vec4.x = vec2.x - part1->dir.x*a; vec4.y = vec2.y - part1->dir.y*a; vec4.z = vec2.z - part1->dir.z*a; b=DOT(vec4,vec4); d = fabs(a) -halfl1; if (d <= 0) { /*c is distance squared from line or end to test if is inside sc*/ if (b < rcut2) intrs+=test_intrpatch(part1,vec2,param->pcanglsw[which+2*patchnum],-1.0*halfl2,intersections,patchnum); } } // printf ("ends %d\n", intrs); } return intrs; } /*CPSC................................................................................*/ /* Find intersections of plane defined by vector w_vec.and returns number of them - for cylindrical psc -CPSC */ int find_intersect_planec(struct particles * part1, struct particles * part2, double halfl, struct vector r_cm, struct vector w_vec, double rcut, double cospatch, double intersections[5]) { int i, intrs=0; double a, c, d, ti, disti; struct vector nplane, d_vec; void normalise(struct vector *); struct vector vec_crossproduct(struct vector, struct vector); nplane=vec_crossproduct(part1->dir,w_vec); normalise(&nplane); normalise(&w_vec); a = DOT(nplane, part2->dir); if (a == 0.0) intrs=0; /* there is no intersection plane and sc are paralel*/ else { ti = DOT(nplane,r_cm)/a; if ((ti > halfl ) || (ti < -halfl)) intrs=0; /* there is no intersection plane sc is too short*/ else { d_vec.x = ti*part2->dir.x - r_cm.x; /*vector from intersection point to CM*/ d_vec.y = ti*part2->dir.y - r_cm.y; d_vec.z = ti*part2->dir.z - r_cm.z; c = DOT (d_vec, w_vec); if ( c *cospatch < 0) intrs=0; /* the intersection in plane is on other side of patch */ else { d = fabs(DOT (d_vec, part1->dir)) - halfl; if (d <= 0) { disti= c*c; /*is inside cylinder*/ if (disti > rcut*rcut) intrs=0; /* the intersection is outside sc */ else { intrs=1; i=0; while (intersections[i] !=0) { if (ti == intersections[i]) intrs=0; /* found intersection we already have -it is at boundary*/ i++; } if (intrs > 0) intersections[i]=ti; } } } } } return intrs; } /*..................................................................................*/ /* Find projection of cpsc on plane (0,0,1) including cutoff and return vector to its begining and end and cm */ int psc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir, struct vector* partdir, BOOL* positive, double * cutdist, struct vector *partbeg, struct vector *partend) { struct vector vec1; double k,x1,x2,y1,y2,a,b,c,e,d; void projectinz( struct vector* vec1, struct vector * projectdir, struct vector* result); void normalise(struct vector*); if (( (*positive)&& (projectdir->z > 0) ) || ( (!(*positive))&& (projectdir->z < 0) )) return 0; if ( fabs(partbeg->z) > (*cutdist) ) return 0; /* we might have interacting segment*/ x2 = 0.0; y2 = 0.0; /*begining point*/ /*if begining projected along particle direction is within cutoff */ if (fabs(partdir->z) > ZEROTOL2) { projectinz(partbeg,partdir,pbeg); a=0; } else { /*we need some starting point*/ vec1.x = 2.0*partbeg->x - partend->x; vec1.y = 2.0*partbeg->y - partend->y; vec1.z = 2.0*partbeg->z - partend->z; projectinz(&vec1,projectdir,pbeg); a=1; } if (partdir->z != 0) { b = fabs(partbeg->z / partdir->z); } else { b = (*cutdist)+1.0; } if ( (b > (*cutdist)) || (a==1)) { /*else beginig is at sphere, find intersections with sphere of cutoff radius*/ if ( fabs(projectdir->z) > ZEROTOL2) { projectinz(partbeg,projectdir,pend); } else { pend->x = pbeg->x + projectdir->x; pend->y = pbeg->y + projectdir->y; } if (pend->y == pbeg->y) { y1=pbeg->y; y2=pbeg->y; a=sqrt( (*cutdist)*(*cutdist) - partbeg->z*partbeg->z - (pbeg->y-partbeg->y)*(pbeg->y-partbeg->y) ); x1 = partbeg->x + a; x2 = partbeg->x - a; if (pend->x > pbeg->x) {/*select the right intersection*/ pbeg->x = x2; x2 = x1; } else { pbeg->x = x1; } pbeg->y = y1; } else { k = (pend->x - pbeg->x)/ (pend->y - pbeg->y); a = k*k +1; b = partbeg->y + k*k*pbeg->y - k*pbeg->x + k*partbeg->x; c = partbeg->y*partbeg->y + partbeg->z*partbeg->z - (*cutdist)*(*cutdist) + (k*pbeg->y - pbeg->x + partbeg->x)*(k*pbeg->y - pbeg->x + partbeg->x); e = b*b-a*c; if (e < 0) { return 0; /*tehre might be no intersection with sphere*/ } d = sqrt(e); if (pend->y > pbeg->y) {/*select the right intersection*/ y1 = (b - d ) /a; y2 = (b + d ) /a; } else { y1 = (b + d ) /a; y2 = (b - d ) /a; } x1 = k * (y1 - pbeg->y) + pbeg->x; x2 = k * (y2 - pbeg->y) + pbeg->x; pbeg->x = x1; pbeg->y = y1; pbeg->z = 0.0; } } //printf("pscwall beg %f %f \n",pbeg->x,pbeg->y); /*end point*/ a = -(*cutdist) * projectdir->z; /*z coordinate of point where projection is in cut distance*/ //printf("sphere end %f %f ",a,partend->z); if ( ((partend->z < a)&&(*positive)) || ((a < partend->z)&&(!(*positive))) ){ /*end is within cut off - second sphere*/ /*if this is the case vec1 is end of pherocylinder and pend is its projection*/ if (projectdir->z != 0) { projectinz(partend,projectdir,pend); } else { pend->x = pbeg->x + projectdir->x; pend->y = pbeg->y + projectdir->y; } if (pend->y == pbeg->y) { y1=pend->y; y2=pend->y; a=sqrt( (*cutdist)*(*cutdist) - partend->z*partend->z - (pend->y-partend->y)*(pend->y-partend->y) ); x1 = partend->x + a; x2 = partend->x - a; if (pbeg->x > pend->x) {/*select the right intersection*/ pend->x = x2; } else { pend->x = x1; } pend->y = y1; } else { k = (pbeg->x - pend->x)/ (pbeg->y - pend->y); a = k*k +1; b = partend->y + k*k*pend->y - k*pend->x + k*partend->x; c = partend->y*partend->y + partend->z*partend->z - (*cutdist)*(*cutdist) + (k*pend->y - pend->x + partend->x)*(k*pend->y - pend->x + partend->x); e = b*b-a*c; if (e < 0) { return 0; /*there might be no intersection with sphere*/ } d = sqrt(e); if (pbeg->y > pend->y) {/*select the right intersection*/ y1 = (b - d ) /a; y2 = (b + d ) /a; } else { y1 = (b + d ) /a; y2 = (b - d ) /a; } x1 = k * (y1 - pend->y) + pend->x; x2 = k * (y2 - pend->y) + pend->x; pend->x = x1; pend->y = y1; pend->z = 0.0; } } else { if ( ((partbeg->z < a)&&(*positive)) || ((a < partbeg->z)&&(!(*positive))) ) { /*end is at cutoff going through cylindrical part*/ //printf("cylinder "); b = (a - partbeg->z)/ partdir->z; vec1.x = partbeg->x + b * partdir->x; vec1.y = partbeg->y + b * partdir->y; vec1.z = a; projectinz(&vec1,projectdir,pend); } else { /* also projected end is within the same sphere as begining- no contribution from cylinder*/ if (x2 == 0.0 ) { //printf("sphere beg "); if (projectdir->z != 0) { projectinz(partbeg,projectdir,pend); } else { pend->x = pbeg->x + projectdir->x; pend->y = pbeg->y + projectdir->y; } if (pend->y == pbeg->y) { y1=pbeg->y; y2=pbeg->y; a=sqrt( (*cutdist)*(*cutdist) - partbeg->z*partbeg->z - (pbeg->y-partbeg->y)*(pbeg->y-partbeg->y) ); x1 = partbeg->x + a; x2 = partbeg->x - a; if (pend->x > pbeg->x) {/*select the right intersection*/ pend->x = x1; } else { pend->x = x2; } pend->y = y1; } else { k = (pend->x - pbeg->x)/ (pend->y - pbeg->y); a = k*k +1; b = partbeg->y + k*k*pbeg->y - k*pbeg->x + k*partbeg->x; c = partbeg->y*partbeg->y + partbeg->z*partbeg->z - (*cutdist)*(*cutdist) + (k*pbeg->y - pbeg->x + partbeg->x)*(k*pbeg->y - pbeg->x + partbeg->x); e = b*b-a*c; if (e < 0) { return 0; /*tehre might be no intersection with sphere*/ } d = sqrt(e); if (pend->y > pbeg->y) {/*select the right intersection*/ y1 = (b - d ) /a; y2 = (b + d ) /a; } else { y1 = (b + d ) /a; y2 = (b - d ) /a; } x1 = k * (y1 - pbeg->y) + pbeg->x; x2 = k * (y2 - pbeg->y) + pbeg->x; pend->x = x1; pend->y = y1; pend->z = 0.0; } } else { pend->x = x2; pend->y = y2; pend->z = 0.0; } return 2; /*line end is on sphere of particle begining = no cylindrical cutoff*/ } } return 1; } int cpsc_wall(struct vector* pbeg, struct vector* pend, struct vector* projectdir, struct vector* partdir, double* halfl, BOOL* orientin, BOOL* positive, double* rcmz, double * cutdist, struct vector *partbeg, struct vector *partend) { struct vector vec1; double a; void projectinz( struct vector* vec1, struct vector * projectdir, struct vector* result); if (( (*positive)&& (projectdir->z >= 0) ) || ( (!(*positive))&& (projectdir->z <= 0) )) return 0; /*if projected closer point beoynd cutoff no interaction*/ /*project begining of spherocylinder*/ vec1.x = partbeg->x; vec1.y = partbeg->y; vec1.z = partbeg->z; if (-vec1.z/projectdir->z < (*cutdist) ) { projectinz(&vec1,projectdir,pbeg); } else { return 0; } /* we have interacting segment*/ if (-partend->z/projectdir->z < (*cutdist) ) { /*whole segment interacts*/ vec1.z = partend->z; } else { vec1.z = -(*cutdist)*projectdir->z; } if (partdir->z != 0.0) a = (vec1.z - (*rcmz)) / partdir->z; else { if (*orientin) a = -(*halfl); else a = (*halfl); } vec1.x = partdir->x * a; vec1.y = partdir->y * a; projectinz(&vec1,projectdir,pend); return 1; } int cutprojectatwall(struct vector* pextr1, struct vector* pextr2, struct vector* pextr3, struct vector* pextr4, struct vector* projectdir, struct vector* partdir, double * cutdist, struct vector *partbeg, struct vector *partend, struct vector *pend, double *cuttoproject, BOOL* orientin) { double y1,y2,O2z,det,a,b,dirydirz,dir2x,dir2y,dir2z,dirzldiry; void projectinz( struct vector* vec1, struct vector * projectdir, struct vector* result); dirydirz = partdir->y * partdir->z; dir2x = partdir->x * partdir->x; dir2y = partdir->y * partdir->y; dir2z = partdir->z * partdir->z; a = 1/(dir2x+dir2y); if (partdir->x != 0) { O2z = partbeg->z * partbeg->z; b=dir2y*dir2z*O2z - (dir2x+dir2y) * (O2z*(dir2x+dir2z)- (*cutdist)*(*cutdist)*dir2x); if (b < 0 ) { /*no cutoff from cylindrical part*/ return 0; } det = sqrt(b); y1 = partbeg->y + (dirydirz*partbeg->z + det )*a; y2 = partbeg->y + (dirydirz*partbeg->z - det )*a; if (( (partdir->x > 0)&&(!(*orientin)) ) || ( (partdir->x < 0)&&(*orientin) )) { pextr1->y = y1; pextr2->y = y2; } else { pextr1->y = y2; pextr2->y = y1; } pextr1->x = partbeg->x + (partbeg->z*partdir->z - (pextr1->y - partbeg->y)*partdir->y) / partdir->x; pextr2->x = partbeg->x + (partbeg->z*partdir->z - (pextr2->y - partbeg->y)*partdir->y) / partdir->x; O2z = partend->z * partend->z; b= dir2y*dir2z*O2z - (dir2x+dir2y) * (O2z*(dir2x+dir2z)- (*cutdist)*(*cutdist)*dir2x); if (b >= 0) { /*we have intersections from end*/ det = sqrt(b); y1 = partend->y + (dirydirz * partend->z + det )*a; y2 = partend->y + (dirydirz * partend->z - det )*a; //printf("det %f y1 %f y2 %f \n", det,y1,y2); if (( (partdir->x > 0)&&(!(*orientin)) ) || ( (partdir->x < 0)&&(*orientin) )) { pextr3->y = y1; pextr4->y = y2; } else { pextr3->y = y2; pextr4->y = y1; } pextr3->x = partend->x + (partend->z*partdir->z - (pextr3->y - partend->y)*partdir->y) / partdir->x; pextr4->x = partend->x + (partend->z*partdir->z - (pextr4->y - partend->y)*partdir->y) / partdir->x; } else { /*no intersection at the end the cutoff intersects the plane in the perpendicular projection of line segemnt, so we have to use that point */ if (partdir->z == 0) { fprintf (stderr, "\nERROR: Something went wrong in calculation of projection.\n\n"); exit (1); } else { a = ((*cuttoproject) - partbeg->z)/ partdir->z; //if ( projectdir->y * partdir->x < 0 ) pextr3->x = partbeg->x + a * partdir->x; pextr3->y = partbeg->y + a * partdir->y; pextr3->z = (*cuttoproject); //printf("before proj %f %f dir %f %f %f ",pextr3->x,pextr3->y,projectdir->x,projectdir->y,projectdir->z); projectinz(pextr3,projectdir,pextr4); pextr3->x = pextr4->x; pextr3->y = pextr4->y; pextr3->z = 0.0; //printf("after proj %f %f \n",pextr3->x,pextr3->y); return 2; } } } else { if (partdir->y != 0) { dirzldiry = partdir->z/partdir->y; y1 = partbeg->y + partbeg->z * dirzldiry; det = sqrt( (*cutdist)*(*cutdist) - partbeg->z * partbeg->z * (1+dirzldiry*dirzldiry) ); if (( (partdir->y > 0)&&(!(*orientin)) ) || ( (partdir->y < 0)&&(*orientin) )) { pextr1->x = partbeg->x + det; pextr2->x = partbeg->x - det; } else { pextr1->x = partbeg->x - det; pextr2->x = partbeg->x + det; } pextr1->y = y1; pextr2->y = y1; y1 = partend->y + partend->z * dirzldiry; b = (*cutdist)*(*cutdist) - partend->z * partend->z * (1+dirzldiry*dirzldiry); if (b >= 0) { /*we have intersections from end*/ det = sqrt(b); if (( (partdir->y > 0)&&(!(*orientin)) ) || ( (partdir->y < 0)&&(*orientin) )) { pextr3->x = partend->x + det; pextr4->x = partend->x - det; } else { pextr3->x = partend->x - det; pextr4->x = partend->x + det; } pextr3->y = y1; pextr4->y = y1; } else { /*no intersection at the end the cutoff intersects the plane in the perpendicular projection of line segemnt, so we have to use that point */ if (partdir->z == 0) { fprintf (stderr, "\nERROR: Something went wrong in calculation of projection.\n\n"); exit (1); } else { a = ((*cutdist) - partbeg->z)/ partdir->z; y1 = a * partdir->y + partbeg->y; if ( projectdir->x * partdir->y > 0 ) { pextr3->x = a * partdir->x + partbeg->x; pextr3->y = y1; pextr4->x = pend->x; pextr4->y = pend->y; }else { pextr3->x = pend->x; pextr3->y = pend->y; pextr4->x = a * partdir->x + partbeg->x; pextr4->y = y1; } } } } else { return 0; /* if perpendicular to plane we don't have any intersections*/ } } return 1; } /*project a point in project direction to z plane z=0*/ void projectinz(struct vector* vec1, struct vector* projectdir,struct vector * projection) { projection->x = vec1->x - vec1->z * projectdir->x/projectdir->z; projection->y = vec1->y - vec1->z * projectdir->y/projectdir->z; projection->z = 0; } /*calculates area defined by four points in z=0 plane */ double areafourpoints(struct vector * pbeg, struct vector * pend, struct vector * pbeg1, struct vector * pend1 ) { double area =0.0; struct vector vec1,vec2; /*area by four points... two half vector cross product |(pbegining1-pbegining)x(pend-pbegining)|/2 */ vec1.x = pbeg1->x - pbeg->x; vec1.y = pbeg1->y - pbeg->y; vec2.x = pend->x - pbeg->x; vec2.y = pend->y - pbeg->y; //printf("a: %f %f %f %f \n",vec1.x,vec2.y,vec1.y,vec2.x); area += fabs(vec1.x*vec2.y - vec1.y*vec2.x)*0.5; /* + |(pend-pend1)x(pbegining1-pend1)|/2*/ vec1.x = pend->x - pend1->x; vec1.y = pend->y - pend1->y; vec2.x = pbeg1->x - pend1->x; vec2.y = pbeg1->y - pend1->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; return area; } /*calculates area defined by six points in z=0 plane */ double areaeightpoints(struct vector * p1, struct vector * p2, struct vector * p3, struct vector * p4, struct vector * p5, struct vector * p6,struct vector * p7, struct vector * p8) { double area =0.0; struct vector vec1,vec2; /*area by half vector cross product |(pbegining-pbegining)x(pend-pbegining)|/2 */ vec1.x = p2->x - p1->x; vec1.y = p2->y - p1->y; vec2.x = p3->x - p2->x; vec2.y = p3->y - p2->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; //printf("3"); if (p4 != NULL) { vec1.x = p3->x - p1->x; vec1.y = p3->y - p1->y; vec2.x = p4->x - p3->x; vec2.y = p4->y - p3->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; //printf("4"); if (p5 != NULL) { vec1.x = p4->x - p1->x; vec1.y = p4->y - p1->y; vec2.x = p5->x - p4->x; vec2.y = p5->y - p4->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; //printf("5"); if (p6 != NULL) { vec1.x = p5->x - p1->x; vec1.y = p5->y - p1->y; vec2.x = p6->x - p5->x; vec2.y = p6->y - p5->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; //printf("6"); if (p7 != NULL) { vec1.x = p6->x - p1->x; vec1.y = p6->y - p1->y; vec2.x = p7->x - p6->x; vec2.y = p7->y - p6->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; //printf("7"); if (p8 != NULL) { vec1.x = p7->x - p1->x; vec1.y = p7->y - p1->y; vec2.x = p8->x - p7->x; vec2.y = p8->y - p7->y; area += fabs(vec1.x*vec2.y -vec1.y*vec2.x)*0.5; //printf("8"); } } } } } return area; } /*..............................................................................*/ /*........................INPUT OUTPUT..........................................*/ /*..............................................................................*/ /*..............................................................................*/ /** * convert string num into two integers */ void readii(char * num, int value[2]){ char *end, *num2; void trim (char *); value[0] = strtol(num, &num2, 10); trim(num2); if ((int)strlen(num2) > 0) value[1] = strtol(num2, &end, 10); else { value[1] =0; return; } if(*end){ fprintf(stderr, "Could not convert %s into two integers\n", num); exit(1); } return; } /** * convert string num into integer */ int readi(char * num){ char *end; int i = strtol(num, &end, 10); if(*end){ fprintf(stderr, "Could not convert %s into integer\n", num); exit(1); } return (int) i; } /** * convert string num into long */ long readl(char * num){ char *end; long i = strtol(num, &end, 10); if(*end){ fprintf(stderr, "Could not convert %s into long\n", num); exit(1); } return i; } /** * convert string num into double */ double readd(char * num){ char *end; double i = strtod(num, &end); if(*end){ fprintf(stderr, "Could not convert %s into double\n", num); exit(1); } return i; } /* Reads the run parameters from the external file "options". See the end of the code for a template. All comments starting with '#' are stripped out. The options are summarised on standard output and checked for validity of range. */ void read_options(struct sim* sim,char filename[30]) { int i; int num_options = -1; double transmx, rotmx, chainmmx, chainrmx; double angle, chain_angle; char *id, *value, *tokLine, *line; FILE *infile; void strip_comment (char *); void trim (char *); void readii(char * num, int value[2]); double readd(char *); long readl(char *); int readi(char *); /* for new options add before the last line */ Option options[] = { {"write_cluster", Long, FALSE, &sim->write_cluster}, {"adjust", Long, FALSE, &sim->adjust}, {"movie", Long, FALSE, &sim->movie}, {"nequil", Long, FALSE, &sim->nequil}, {"nsweeps", Long, FALSE, &sim->nsweeps}, {"nrepchange", Long, FALSE, &sim->nrepchange}, {"paramfrq", Long, FALSE, &sim->paramfrq}, {"report", Long, FALSE, &sim->report}, {"seed", Long, FALSE, &seed}, {"pairlist_update", Int, FALSE, &sim->pairlist_update}, {"ptype", Int, FALSE, &sim->ptype}, {"wlm", Int2, FALSE, &sim->wlm}, {"wlmtype", Int, FALSE, &sim->wl.wlmtype}, {"press", Double, FALSE, &sim->press}, {"paralpress", Double, FALSE, &sim->paralpress}, {"edge_mx", Double, FALSE, &sim->edge.mx}, {"shave", Double, FALSE, &sim->shave}, {"chainprob", Double, FALSE, &sim->chainprob}, {"switchprob", Double, FALSE, &sim->switchprob}, {"temper", Double, FALSE, &sim->temper}, {"paraltemper", Double, FALSE, &sim->paraltemper}, {"transmx", Double, FALSE, &transmx}, {"rotmx", Double, FALSE, &rotmx}, {"chainmmx", Double, FALSE, &chainmmx}, {"chainrmx", Double, FALSE, &chainrmx}, {"last", Int, FALSE, NULL} }; while(options[++num_options].var != NULL) ; /*--- 1. Read in values ---*/ size_t line_size = (STRLEN + 1) * sizeof(char); line = (char *) malloc(line_size); infile = fopen(filename, "r"); if (infile == NULL) { fprintf (stderr, "\nERROR: Could not open options file.\n\n"); exit (1); } while(getline(&line, &line_size, infile) != -1){ // strip comments strip_comment(line); trim(line); if(strlen(line) == 0){ continue; } // tokenize tokLine = line; id = strtok(tokLine, "="); if(id == NULL){ fprintf(stderr, "error parsing Configuration line (%s)", line); free(line); exit(1); } trim(id); tokLine = NULL; value = strtok(tokLine, "="); trim(value); if(value == NULL){ fprintf(stderr, "error parsing Configuration line (%s)", line); free(line); exit(1); } //printf("id: %s; value: %s\n", id, value); for(i = 0; i < num_options; i++){ if(strcmp(id, options[i].id) == 0){ if(options[i].type == Int2){ readii(value,*((int (*)[2]) options[i].var)); options[i].set = TRUE; break; } if(options[i].type == Int){ *((int *) options[i].var) = readi(value); options[i].set = TRUE; break; } else if(options[i].type == Long){ *((long *) options[i].var) = readl(value); options[i].set = TRUE; break; } else if(options[i].type == Double){ *((double *) options[i].var) = readd(value); options[i].set = TRUE; break; } else { fprintf(stderr, "Could not determine type of %s!\n", id); free(line); exit(1); } } } if(i == num_options){ fprintf(stderr, "Unknown identifier %s!\nWill procede.\n", id); } } fclose (infile); free(line); /* Check, wheter all options have been readin */ for(i = 0; i < num_options; i++){ if(!options[i].set){ fprintf(stderr, "option '%s' is not set!\n", options[i].id); exit(1); } } /*--- 2. Summarize results on standard output ---*/ /* Density of close-packed spherocylinders */ // rho_cp = 2.0/(sqrt(2.0) + *length * sqrt(3.0)); printf (" Pressure coupling type: %d\n", sim->ptype); printf (" Pressure: %.8lf\n", sim->press); printf (" Replica exchange pressure: %.8lf\n", sim->paralpress); printf (" Average volume change attempts per sweep: %.8lf\n", sim->shave); printf (" Equilibration sweeps: %ld\n", sim->nequil); printf (" Sweeps between step size adjustments: %ld\n", sim->adjust); printf (" Production sweeps: %ld\n", sim->nsweeps); printf (" Sweeps between statistics samples: %ld\n", sim->paramfrq); printf (" Sweeps between statistics reports: %ld\n", sim->report); printf (" Average chain move attempts per sweep: %.8lf\n", sim->chainprob); printf (" Initial maximum displacement: %.8lf\n", transmx); printf (" Inititial maximum angular change (degrees): %.8lf\n", rotmx); printf (" Inititial maximum box edge change: %.8lf\n", sim->edge.mx); printf (" Initial maximum chain displacement: %.8lf\n", chainmmx); printf (" Inititial maximum chain angular change (degrees): %.8lf\n", chainrmx); printf (" Temperature in kT/e: %.8lf\n", sim->temper); printf (" Parallel tempering temperature in kT/e: %.8lf\n", sim->paraltemper); printf (" Sweeps between replica exchange: %ld\n", sim->nrepchange); printf (" Wang-Landau method: %d %d\n", sim->wlm[0],sim->wlm[1]); printf (" Calculate the Wang-Landau method for atom type: %d\n", sim->wl.wlmtype); printf (" Average type switch attempts per sweep: %.8lf\n", sim->switchprob); printf (" Number of Sweeps per pairlist update: %d\n", sim->pairlist_update); printf (" Random number seed: %ld\n", seed); printf (" Number of sweeps per writing out cluster info: %ld\n", sim->write_cluster); if (sim->movie > 0) { printf (" Sweeps between movie frames: %ld\n", sim->movie); } else { printf (" No movie\n"); } printf ("\n"); if(sim->pairlist_update){ printf(" A pairlist will be generated every %d steps. This is a greedy" " algorithm; make sure you don't have big chains etc.!\n", sim->pairlist_update); } /*--- 3. Validity checks ---*/ if (rotmx < 0.0 || rotmx > 180) { fprintf (stderr, "ERROR: Maximum orientation change must be in range 0 to 180.\n\n"); exit (1); } if (chainrmx < 0.0 || chainrmx > 180) { fprintf (stderr, "ERROR: Maximum orientation change for chains must be in range 0 to 180.\n\n"); exit (1); } if ( (sim->ptype <0) || (sim->ptype>3) ) { fprintf (stderr, "ERROR: Unknown pressure coupling %d. Program only knows: 0 - anisotropic coupling, \ 1 - isotropic coupling, 2 - isotropic in xy z=const, 3 - isotropic xy V=const.\n\n",sim->ptype); exit (1); } if ( (sim->wlm[0] <0) || (sim->wlm[0] > 7) || (sim->wlm[1] <0) || (sim->wlm[1] > 7) ) { fprintf (stderr, "ERROR: Unknown Wang-Landau method %d %d. Program only knows: 0 - none, \ 1 - z-direction od 1st particle, 2 - pore in membrane, 3 - zorientation of 0th particle,\ 4 - distance of fist two particles, 5 - pore around z-axis above CM,\ 6 - pore around z-axis above 0th particle, 7 - number of particles in contact \n\n",sim->wlm[0],sim->wlm[1]); exit (1); } if ( (sim->wlm[0] == 0) && (sim->wlm[1] > 0) ) { fprintf (stderr, "ERROR: Wang-Landau method has to be set for first order parameter and then for second order parameter\n\n"); exit (1); } if ( (sim->wlm[0] == 2) || (sim->wlm[0] == 5) || (sim->wlm[0] == 6) ) { if(sim->wl.wlmtype < 1){ fprintf (stderr, "ERROR: Atom type for the Wang-Landau Method (%d) was false defined.\n\n",sim->wl.wlmtype); exit (1); } if ( (sim->wlm[1] == 2) || (sim->wlm[1] == 5) || (sim->wlm[1] == 6) ) { fprintf (stderr, "ERROR: Simulaneous use of two pore order parameters has not been implemented yet.\n\n"); exit (1); } } /* we store maximum rotation as half angle - useful for quaterions*/ angle = rotmx / 180.0 * PIH *0.5; rotmx = cos((rotmx)/180.0*PIH); chain_angle = chainrmx / 180.0 * PIH; chainrmx = cos((chainrmx)/180.0*PIH); sim->edge.mx *= 2.0; /* The full range is -maxl to +maxl, i.e. spanning 2*maxl */ transmx *= 2.0; /* The full range is -maxr to +maxr, i.e. spanning 2*maxr */ chainmmx *= 2.0; /* The full range is -maxr to +maxr, i.e. spanning 2*maxr */ for (i=0;i<MAXT;i++) { sim->trans[i].mx = transmx; sim->rot[i].mx = rotmx; sim->rot[i].angle = angle; } for (i=0;i<MAXMT;i++) { sim->chainm[i].mx = chainmmx; sim->chainr[i].mx = chainrmx; sim->chainr[i].angle = chain_angle; } //parallel tempering #ifdef MPI if ( (sim->temper != sim->paraltemper) && (sim->mpinprocs <2) ) { printf("ERROR: Paralllel tempering at single core does not work.\n\n"); exit(1); } sim->dtemp = (sim->paraltemper - sim->temper )/(sim->mpinprocs-1); sim->temper += sim->dtemp * sim->mpirank; if ( (sim->press != sim->paralpress) && (sim->mpinprocs <2) ) { printf("ERROR: Pressure replica exchange at single core does not work.\n\n"); exit(1); } sim->dpress = (sim->paralpress - sim->press )/(sim->mpinprocs-1); sim->press += sim->dpress * sim->mpirank; seed += sim->mpirank; sim->mpiexch.mx = sim->dtemp; sim->mpiexch.angle = sim->dpress; #endif } /*..............................................................................*/ /* Used by read_options to read a long integer with error checking. NOT USED ANYMORE */ long read_long(FILE *infile, char *error) { char *gotline; char line[500]; int fields; long value; gotline = fgets(line, sizeof(line), infile); fields = sscanf(line, "%ld", &value); if (gotline == NULL || fields != 1) { fprintf (stdout, "\nERROR reading %s from options file.\n\n", error); exit (1); } return value; } /* Used by read_options to read a long integer with error checking. NOT USED ANYMORE */ int read_int(FILE *infile, char *error) { char *gotline; char line[500]; int fields; int value; gotline = fgets(line, sizeof(line), infile); fields = sscanf(line, "%d", &value); if (gotline == NULL || fields != 1) { fprintf (stdout, "\nERROR reading %s from options file.\n\n", error); exit (1); } return value; } /*..............................................................................*/ /* Used by read_options to read a double precision with error checking. NOT USED ANYMORE */ double read_double(FILE *infile, char *error) { char *gotline; char line[500]; int fields; double value; gotline = fgets(line, sizeof(line), infile); fields = sscanf(line, "%le", &value); if (gotline == NULL || fields != 1) { fprintf (stdout, "\nERROR reading %s from options file.\n\n", error); exit (1); } return value; } /*..............................................................................*/ /**************************************************************************** * CONFIG INITIALIZATION *****************************************************************************/ /* Reads in the initial configuration from the file "config.init". Each line contains the three components of the position vector and three components of the direction vector and three components of patch direction for a spherocylinder. The direction vector is normalised after being read in. The configuration is checked for particle overlaps. */ void init_config(struct topo * topo, struct conf * conf, struct sim * sim, char filename[30]) { int err,fields,tmp_type; long i,j,current,first; FILE * infile; char * line, line2[STRLEN]; size_t line_size = (STRLEN + 1) * sizeof(char); line = (char *) malloc(line_size); struct particles chorig[MAXCHL]; int overlap(struct particles, struct particles, struct vector, struct ia_param [MAXT][MAXT]); void normalise(struct vector *); void ortogonalise(struct vector *, struct vector); void usepbc(struct vector *, struct vector); double anint(double); void strip_comment (char *); void trim (char *); void aftercommand(char *, char *, char); double maxlength = 0; for(i = 0; i < MAXT; i++){ if(maxlength < topo->ia_params[i][i].len[0]) maxlength = topo->ia_params[i][i].len[0]; } infile = fopen(filename, "r"); if (infile == NULL) { fprintf (stderr, "\nERROR: Could not open config.init file.\n\n"); exit (1); } if(getline(&line, &line_size, infile) == -1){ fprintf (stderr, "ERROR: Could not read box size.\n\n"); exit (1); } strip_comment(line); trim(line); if (sscanf(line, "%le %le %le", &(conf->box.x), &(conf->box.y), &(conf->box.z)) != 3) { if(getline(&line, &line_size, infile) == -1){ fprintf (stderr, "ERROR: Could not read box size.\n\n"); exit (1); } aftercommand(line2,line,BOXSEP); strip_comment(line2); trim(line2); if (sscanf(line2, "%le %le %le", &(conf->box.x), &(conf->box.y), &(conf->box.z)) != 3) { fprintf (stderr, "ERROR: Could not read box size.\n\n"); exit (1); } } if (conf->box.x < maxlength * 2.0 + 2.0) { printf ("WARNING: x box length is less than two spherocylinders long.\n\n"); } if (conf->box.y < maxlength * 2.0 + 2.0) { printf ("WARNING: y box length is less than two spherocylinders long.\n\n"); } if (conf->box.z < maxlength * 2.0 + 2.0) { printf ("WARNING: z box length is less than two spherocylinders long.\n\n"); } DEBUG_INIT("Position of the particle"); for (i=0; i < topo->npart; i++) { if(getline(&line, &line_size, infile) == -1){ break; } strip_comment(line); trim(line); fields = sscanf(line, "%le %le %le %le %le %le %le %le %le %d", &conf->particle[i].pos.x, &conf->particle[i].pos.y, &conf->particle[i].pos.z, &conf->particle[i].dir.x, &conf->particle[i].dir.y, &conf->particle[i].dir.z, &conf->particle[i].patchdir[0].x, &conf->particle[i].patchdir[0].y, &conf->particle[i].patchdir[0].z, &conf->particle[i].switched); conf->particle[i].patchdir[1].x = conf->particle[i].patchdir[1].y = conf->particle[i].patchdir[1].z =0; conf->particle[i].chdir[0].x = conf->particle[i].chdir[0].y = conf->particle[i].chdir[0].z =0; conf->particle[i].chdir[1].x = conf->particle[i].chdir[1].y = conf->particle[i].chdir[1].z =0; DEBUG_INIT("Line: %s\nNumber of Fields: %d", line, fields); if (fields == 9){ conf->particle[i].switched = 0; fprintf(stdout, "WARNING: Particle %ld is assumed to be not switched!\n", i+1); fields++; } if (fields != 10) { fprintf (stderr, "ERROR: Could not read coordinates for particle %ld.\n \ Did you specify box size at the begining?\n\n", i+1); free(line); exit (1); } /* Scale position vector to the unit cube */ usepbc(&conf->particle[i].pos, conf->box ); conf->particle[i].pos.x /= conf->box.x; conf->particle[i].pos.y /= conf->box.y; conf->particle[i].pos.z /= conf->box.z; if ((topo->ia_params[conf->particle[i].type][conf->particle[i].type].geotype[0]<SP)&&( DOT(conf->particle[i].dir, conf->particle[i].dir) < ZEROTOL )) { //DEBUG_INIT("Geotype = %d < %d", conf->particle[i].geotype,SP); fprintf (stderr, "ERROR: Null direction vector supplied for particle %ld.\n\n", i+1); free(line); exit (1); } else { normalise(&conf->particle[i].dir); } if ((topo->ia_params[conf->particle[i].type][conf->particle[i].type].geotype[0]<SP)&&( DOT(conf->particle[i].patchdir[0], conf->particle[i].patchdir[0]) < ZEROTOL )) { fprintf (stderr, "ERROR: Null patch vector supplied for particle %ld.\n\n", i+1); free(line); exit (1); } else { ortogonalise(&conf->particle[i].patchdir[0],conf->particle[i].dir); normalise(&conf->particle[i].patchdir[0]); } // Switch the type if(conf->particle[i].switched){ if(conf->particle[i].switchtype == 0){ fprintf(stderr, "ERROR: Particle %ld switched even though it has no switchtype", i); free(line); exit(1); } tmp_type = conf->particle[i].type; conf->particle[i].type = conf->particle[i].switchtype; conf->particle[i].switchtype = tmp_type; } DEBUG_INIT("%ld:\t%lf\t%lf\t%lf", i, conf->particle[i].pos.x, conf->particle[i].pos.y, conf->particle[i].pos.z); } free(line); /*Make chains WHOLE*/ for (i=0;i<topo->chainnum;i++){ j=0; current = topo->chainlist[i][0]; first = current; chorig[0].pos = conf->particle[first].pos; while (current >=0 ) { /*shift the chain particle by first one*/ conf->particle[current].pos.x -= chorig[0].pos.x; conf->particle[current].pos.y -= chorig[0].pos.y; conf->particle[current].pos.z -= chorig[0].pos.z; /*put it in orig box*/ conf->particle[current].pos.x -= anint(conf->particle[current].pos.x); conf->particle[current].pos.y -= anint(conf->particle[current].pos.y); conf->particle[current].pos.z -= anint(conf->particle[current].pos.z); //printf("ant: %f %f %f\n",conf->particle[current].pos.x,conf->particle[current].pos.y,conf->particle[current].pos.z); /*shot it back*/ conf->particle[current].pos.x += chorig[0].pos.x; conf->particle[current].pos.y += chorig[0].pos.y; conf->particle[current].pos.z += chorig[0].pos.z; //printf("posstart: %f %f %f\n",conf->particle[current].pos.x,conf->particle[current].pos.y,conf->particle[current].pos.z); j++; current = topo->chainlist[i][j]; } } err = 0; //for (i=0; i < topo->npart-1; i++) { // for (j=i+1; j < topo->npart; j++) { // if ( overlap(conf->particle[i], conf->particle[j], conf->box, topo->ia_params) ) { // fprintf (stderr, // "ERROR: Overlap in initial coniguration between particles %ld and %ld.\n", // i+1, j+1); // err = 1; // } // } //} if (err) { printf ("\n"); exit (1); } fclose (infile); fflush (stdout); } /*..............................................................................*/ /**************************************************************************** * TOPOLOGY INITIALIZATION *****************************************************************************/ /* Create lists for chain operations: Connectivity list where it is written for each sc with which sc it is connected. The order is important because spherocylinders have direction First is interacting tail then head. Chain list where particles are assigned to chains to which they belong */ void init_top(struct topo * topo, struct conf * conf, struct sim * sim,char filename[30]) { long i,j,k,mol,maxch,maxpart; FILE *infile; char *pline=NULL, *dummy=NULL, *sysnames[MAXN]; char line[STRLEN], keystr[STRLEN], molname[STRLEN]; unsigned size; long *sysmoln /*[MAXN]*/; BOOL exclusions[MAXT][MAXT]; char *fgets2(char *, int , FILE *); void strip_comment (char *); void trim(char *); int continuing(char *); void upstring (char *); int filltypes(char **, struct topo * topo); int fillexter(char **, struct topo * topo); int fillexclusions(char **, BOOL (*exclusions)[MAXT][MAXT]); void beforecommand(char *, char *, char); int fillmol(char *, char *, struct molecule * molecules, struct topo * topo); int fillsystem(char *, char *[MAXN], long **); void initparams(struct topo * topo); void genparampairs(struct topo * topo, BOOL (*exclusions)[MAXT][MAXT]); int topdealoc(char **, char *[MAXN], long **, struct molecule *); struct molecule molecules[MAXMT]; if ((infile = fopen(filename, "r")) == NULL) { fprintf (stderr, "\nTOPOLOGY ERROR: Could not open top.init file.\n\n"); exit (1); } fprintf (stdout, "Initialize chainlist...\n"); fflush(stdout); sysmoln = malloc( sizeof(long)*MAXN); if(sysmoln == NULL){ fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for sysmoln"); exit(1); } struct particles tmp_particles[MAXN]; for (i=0;i<MAXN;i++) { if (i < MAXMT) { topo->chainparam[i].bond1eq = -1; topo->chainparam[i].bond1c = -1; topo->chainparam[i].bond2eq = -1; topo->chainparam[i].bond2c = -1; topo->chainparam[i].bonddc = -1; topo->chainparam[i].angle1eq = -1; topo->chainparam[i].angle1c = -1; topo->chainparam[i].angle2eq = -1; topo->chainparam[i].angle2c = -1; molecules[i].name = NULL; molecules[i].type = malloc(sizeof(long)*MAXN); molecules[i].switchtype = malloc(sizeof(long)*MAXN); molecules[i].delta_mu = malloc(sizeof(double)*MAXN); for (j=0;j<MAXN;j++) { molecules[i].type[j] = -1; } } for (j = 0; j < MAXCHL; j++){ topo->chainlist[i][j] = -1; } sysnames[i]=NULL; } for (i=0;i<MAXT;i++) { for (j=0;j<MAXT;j++) { exclusions[i][j]=FALSE; } } topo->exter.exist = FALSE; topo->exter.thickness = 0.0; topo->exter.epsilon = 0.0; topo->exter.attraction = 0.0; topo->exter.sqmaxcut = 0.0; for(i = 0; i < MAXT; i++){ for(j = 0; j < MAXT; j++){ for(k = 0; k < 2; k++){ topo->ia_params[i][j].geotype[k] = 0; } } } fprintf (stdout, "Reading topology...\n"); fflush(stdout); molname[0] = ' '; initparams(topo); pline=malloc((size_t)STRLEN); while (fgets2(line,STRLEN-2,infile) != NULL) { strcpy(pline,line); if (!pline) fprintf (stderr, "\nTOPOLOGY ERROR: Empty line in topology.\n\n"); /* build one long line from several fragments */ while (continuing(line) && (fgets2(line,STRLEN-1,infile) != NULL)) { size=strlen(pline)+strlen(line)+1; free(pline); pline=malloc((size_t)size); strcat(pline,line); } /* skip trailing and leading spaces and comment text */ strip_comment (pline); trim (pline); /* if there is something left... */ if ((int)strlen(pline) > 0) { // get the [COMMAND] key if (pline[0] == OPENKEY) { pline[0] = ' '; beforecommand(keystr,pline,CLOSEKEY); upstring (keystr); } else { //DEBUG fprintf (stdout, "Topology read type:%s, %s \n",keystr,pline); if (!strcmp(keystr,"TYPES")) { fflush(stdout); if (!filltypes(&pline, topo)) { DEBUG_INIT("Something went wrong with filltypes"); fprintf (stderr, "\nTOPOLOGY ERROR: in reading types\n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } DEBUG_INIT("back in init_top"); } else{ if (!strcmp(keystr,"MOLECULES")){ DEBUG_INIT("Let's go to the molecules"); if (molname[0] == ' ') { beforecommand(molname,pline,SEPARATOR); i=0; while (molecules[i].name != NULL) i++; DEBUG_INIT("in the middle of getting to fillmol"); molecules[i].name = malloc(strlen(molname)+1); strcpy(molecules[i].name, molname); fprintf (stdout, "Topology read for molecule: %s \n",molname); } if (!fillmol(molname, pline, molecules, topo)) { fprintf (stderr, "\nTOPOLOGY ERROR: in reading molecules\n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } if ((dummy = strchr (pline,CLOSEMOL)) != NULL) molname[0] = ' '; } else { if (!strcmp(keystr,"SYSTEM")) { if (!fillsystem(pline,sysnames,&sysmoln)) { fprintf (stderr, "\nTOPOLOGY ERROR: in reading system\n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } } else { if (!strcmp(keystr,"EXTER")) { fflush(stdout); if (!fillexter(&pline, topo)) { DEBUG_INIT("Something went wrong with external potential"); fprintf (stderr, "\nTOPOLOGY ERROR: in reading external potential\n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } } else { if (!strcmp(keystr,"EXCLUDE")) { fflush(stdout); if (!fillexclusions(&pline,&exclusions)) { DEBUG_INIT("Something went wrong with exclusions potential"); fprintf (stderr, "\nTOPOLOGY ERROR: in reading exclusions\n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } } else { fprintf (stderr, "\nTOPOLOGY ERROR: invalid keyword:%s.\n\n", keystr); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } } } } } } } } /*we have sucessfully read topology*/ if (pline !=NULL) free(pline); pline=NULL; fclose (infile); fflush (stdout); /*fill ia_params combinations*/ fprintf (stdout, "\nTopology succesfully read. Generating pair interactions...\n"); genparampairs(topo,&exclusions); double maxlength = 0; for(i = 0; i < MAXT; i++){ if(maxlength < topo->ia_params[i][i].len[0]) maxlength = topo->ia_params[i][i].len[0]; } topo->sqmaxcut += maxlength+2; topo->sqmaxcut *= 1.1; topo->maxcut = topo->sqmaxcut; topo->sqmaxcut = topo->sqmaxcut*topo->sqmaxcut; topo->exter.sqmaxcut += maxlength; topo->exter.sqmaxcut *= topo->exter.sqmaxcut*1.1; /*TODO fill chain list and maxch, park particle type*/ fprintf (stdout, "Generating chainlist...\n"); maxch=0; maxpart=0; i=0; while (sysnames[i]!=NULL) { mol=0; while (strcmp(molecules[mol].name,sysnames[i])) { mol++; if (molecules[mol].name == NULL) { fprintf (stderr, "TOPOLOGY ERROR: molecules %s is not defined.\n\n",sysnames[i]); topdealoc(&pline,sysnames,&sysmoln, molecules); exit(1); } } for (j=0;j<sysmoln[i];j++) { //DEBUG fprintf (stdout, "molnames %s sysname %s sysnum %ld \n",molnames[mol],sysnames[i],sysmoln[i]); k=0; while (molecules[mol].type[k] != -1) { tmp_particles[maxpart].type = molecules[mol].type[k]; tmp_particles[maxpart].switchtype = molecules[mol].switchtype[k]; tmp_particles[maxpart].delta_mu = molecules[mol].delta_mu[k]; tmp_particles[maxpart].chaint = mol; tmp_particles[maxpart].chainn = maxch; if (k > MAXCHL) { fprintf (stderr, "TOPOLOGY ERROR: more particles in chan (%ld) than allowed(%d).\n",k,MAXCHL); fprintf (stderr, "Change MAXCHL in source and recompile the program. \n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit(1); } if (molecules[mol].type[1] != -1) { topo->chainlist[maxch][k] = maxpart; } k++; maxpart++; if (maxpart > MAXN) { fprintf (stderr, "TOPOLOGY ERROR: more particles(%ld) than allowed(%d).\n",maxpart,MAXN); fprintf (stderr, "Change MAXN in source and recompile the program. \n\n"); topdealoc(&pline,sysnames,&sysmoln, molecules); exit(1); } } if (molecules[mol].type[1] != -1) { maxch++; } } i++; } topo->npart = maxpart; /* write the particles from the temporary to the "permanent" conf */ conf->particle = malloc(sizeof(struct particles) * topo->npart); if(conf->particle == NULL){ fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for conf->particle"); exit(1); } for(i = 0; i < topo->npart; i++){ conf->particle[i].type = tmp_particles[i].type; conf->particle[i].switchtype = tmp_particles[i].switchtype; conf->particle[i].delta_mu = tmp_particles[i].delta_mu; conf->particle[i].chaint = tmp_particles[i].chaint; conf->particle[i].chainn = tmp_particles[i].chainn; } /* Initialize the clusterlist */ sim->clusterlist = malloc(sizeof(long) * topo->npart); if(sim->clusterlist == NULL){ fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for sim->clusterlist!"); exit(1); } sim->clustersenergy = malloc(sizeof(double) * topo->npart); if(sim->clustersenergy== NULL){ fprintf(stderr, "\nTOPOLOGY ERROR: Could not allocate memory for sim->clustersenergy!"); exit(1); } sim->clusters = NULL; /* get all the particles with switch type */ long switchlist[topo->npart]; long n_switch_part = 0; for(i = 0; i < topo->npart; i++){ if(conf->particle[i].type != conf->particle[i].switchtype){ switchlist[n_switch_part] = i; n_switch_part++; } } topo->n_switch_part = n_switch_part; if (n_switch_part == 0 && sim->switchprob > 0){ fprintf(stderr, "TOPOLOGY WARNING: No switchable particles found, but probability for a switch is not zero!\n"); sim->switchprob = 0; fprintf(stderr, "TOPOLOGY WARNING: We changed Switch Probability to zero in this run!\n"); } topo->switchlist=NULL; if (n_switch_part > 0){ topo->switchlist = malloc(sizeof(long) * n_switch_part); for(i = 0; i < n_switch_part; i++){ topo->switchlist[i] = switchlist[i]; //DEBUG //printf("%ld is in switchlist\n", switchlist[i]); } } j = 0; while (topo->chainlist[j][0] >= 0) { j++; } topo->chainnum = j; if (topo->chainnum != maxch) { fprintf (stderr, "TOPOLOGY ERROR: Maximum number of chains(%ld) does not agree with number of chains (%ld)\n\n",maxch,topo->chainnum); topdealoc(&pline,sysnames,&sysmoln, molecules); exit (1); } k=0; /*clear connectivity and then fill it from chain list*/ fprintf (stdout, "Generating connectivity...\n"); for (i=0; i<MAXN; i++) { topo->conlist[i][0] = -1; topo->conlist[i][1] = -1; topo->conlist[i][2] = -1; topo->conlist[i][3] = -1; } conf->sysvolume = 0; for (i=0; i<maxpart; i++) { for (j=0; j<MAXCHL; j++) { if (topo->chainlist[i][j] >= 0) { k = topo->chainlist[i][j]; if ((j+1 < MAXCHL)&&(topo->chainlist[i][j+1] >= 0)) topo->conlist[k][1] = topo->chainlist[i][j+1]; /*if there is a next particle fill it to head bond*/ if (j > 0) topo->conlist[k][0] = topo->chainlist[i][j-1]; /*if this is not first particle fill tail bond*/ if ((j+2 < MAXCHL)&& (topo->chainlist[i][j+2] >= 0)) topo->conlist[k][3] = topo->chainlist[i][j+2]; /*if there is a second next particle fill it second neighbour*/ if (j > 1) topo->conlist[k][2] = topo->chainlist[i][j-2]; /*if this is not second or first particle fill second tail bond*/ } } conf->sysvolume += topo->ia_params[conf->particle[i].type][conf->particle[i].type].volume; } /*DEBUG for (i=0; i<MAXN; i++) { for (j=0; j<MAXCHL; j++) { fprintf (stderr, " %d",chainlist[i][j]); } fprintf (stderr, " \n"); } for (i=0; i<MAXN; i++) { printf (" %ld %ld %ld %ld\n",conlist[i][0],conlist[i][1],conlist[i][2],conlist[i][3]); } */ // Mark particles as not switched for(i = 0; i < maxpart; i++){ conf->particle[i].switched = 0; } topdealoc(&pline,sysnames,&sysmoln, molecules); DEBUG_INIT("Finished with reading the topology"); /* Parallel tempering check */ #ifdef MPI // probability to switch replicas = exp ( -0.5 * dT*dT * N / (1 + dT) ) printf("Probability to switch replicas is roughly: %f\n",exp(-0.5 * maxpart * sim->dtemp * sim->dtemp / (1.0 + sim->dtemp)) ); #endif } /*..........................................................................*/ /*dealocting memory for init_top*/ int topdealoc(char **pline,char *sysnames[MAXN], long **sysmoln, struct molecule * molecules) { long i; if ((*pline) != NULL) free((*pline)); (*pline)=NULL; if ((*sysmoln) != NULL) free((*sysmoln)); (*sysmoln)=NULL; for (i=0;i<MAXN;i++) { if (i < MAXMT) { free(molecules[i].name); free(molecules[i].type); free(molecules[i].switchtype); free(molecules[i].delta_mu); } if ((sysnames[i]) != NULL) free(sysnames[i]); sysnames[i]=NULL; } return 0; } /* initiate vectors of a single particle*/ void int_partvec(long target, struct ia_param * ia_parami, struct conf * conf ) { struct quat quatrot; struct quat quat_create(struct vector, double, double); void vec_rotate(struct vector *, struct quat); void normalise(struct vector *); void ortogonalise(struct vector *,struct vector); if ( (ia_parami->geotype[0] == SCA) || (ia_parami->geotype[0] == SCN) ){ /*SCA and SCN are isotropic... nothing to initialize*/ return; } normalise (&conf->particle[target].dir); ortogonalise(&conf->particle[target].patchdir[0],conf->particle[target].dir); /*calculate patch sides*/ if ( (ia_parami->geotype[0] == PSC) || (ia_parami->geotype[0] == CPSC) || (ia_parami->geotype[0] == TPSC) || (ia_parami->geotype[0] == TCPSC) ){ /* rotate patch vector by half size of patch*/ conf->particle[target].patchsides[0] = conf->particle[target].patchdir[0]; quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[0], ia_parami->psinhalfi[0]); vec_rotate(&(conf->particle[target].patchsides[0]),quatrot); /*second side*/ conf->particle[target].patchsides[1] = conf->particle[target].patchdir[0]; quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[0], -1.0*ia_parami->psinhalfi[0]); vec_rotate(&(conf->particle[target].patchsides[1]),quatrot); } /*calculate second patchdir*/ if ( (ia_parami->geotype[0] == TPSC) || (ia_parami->geotype[0] == TCPSC) || (ia_parami->geotype[0] == TCHPSC) || (ia_parami->geotype[0] == TCHCPSC)){ conf->particle[target].patchdir[1] = conf->particle[target].patchdir[0]; quatrot=quat_create(conf->particle[target].dir, ia_parami->csecpatchrot[0], ia_parami->ssecpatchrot[0]); vec_rotate(&(conf->particle[target].patchdir[1]),quatrot); ortogonalise(&conf->particle[target].patchdir[1],conf->particle[target].dir); } /*calculate second patch sides*/ if ( (ia_parami->geotype[0] == TPSC) || (ia_parami->geotype[0] == TCPSC) ){ /* rotate patch vector by half size of patch*/ conf->particle[target].patchsides[2] = conf->particle[target].patchdir[1]; quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[2], ia_parami->psinhalfi[2]); vec_rotate(&(conf->particle[target].patchsides[2]),quatrot); /*second side*/ conf->particle[target].patchsides[3] = conf->particle[target].patchdir[1]; quatrot=quat_create(conf->particle[target].dir, ia_parami->pcoshalfi[2], -1.0*ia_parami->psinhalfi[2]); vec_rotate(&(conf->particle[target].patchsides[3]),quatrot); } /*calculate chdir vector*/ if ( (ia_parami->geotype[0] == CHPSC) || (ia_parami->geotype[0] == CHCPSC) || (ia_parami->geotype[0] == TCHPSC) || (ia_parami->geotype[0] == TCHCPSC)){ conf->particle[target].chdir[0] = conf->particle[target].dir; quatrot = quat_create(conf->particle[target].patchdir[0], ia_parami->chiral_cos[0], ia_parami->chiral_sin[0]); vec_rotate(&(conf->particle[target].chdir[0]), quatrot); /* rotate patch vector by half size of patch*/ conf->particle[target].patchsides[0] = conf->particle[target].patchdir[0]; quatrot=quat_create(conf->particle[target].chdir[0], ia_parami->pcoshalfi[0], ia_parami->psinhalfi[0]); vec_rotate(&(conf->particle[target].patchsides[0]),quatrot); /*second side*/ conf->particle[target].patchsides[1] = conf->particle[target].patchdir[0]; quatrot=quat_create(conf->particle[target].chdir[0], ia_parami->pcoshalfi[0], -1.0*ia_parami->psinhalfi[0]); vec_rotate(&(conf->particle[target].patchsides[1]),quatrot); } /*calculate chdir vector for seond patch*/ if ( (ia_parami->geotype[0] == TCHPSC) || (ia_parami->geotype[0] == TCHCPSC) ){ conf->particle[target].chdir[1] = conf->particle[target].dir; quatrot = quat_create(conf->particle[target].patchdir[1], ia_parami->chiral_cos[0], ia_parami->chiral_sin[0]); vec_rotate(&(conf->particle[target].chdir[1]), quatrot); /* rotate patch vector by half size of patch to get sides*/ conf->particle[target].patchsides[2] = conf->particle[target].patchdir[1]; quatrot=quat_create(conf->particle[target].chdir[1], ia_parami->pcoshalfi[2], ia_parami->psinhalfi[2]); vec_rotate(&(conf->particle[target].patchsides[2]),quatrot); /*second side*/ conf->particle[target].patchsides[3] = conf->particle[target].patchdir[1]; quatrot=quat_create(conf->particle[target].chdir[1], ia_parami->pcoshalfi[2], -1.0*ia_parami->psinhalfi[2]); vec_rotate(&(conf->particle[target].patchsides[3]),quatrot); } } /* calculate vectors on particles for speedup*/ void partvecinit(struct topo * topo, struct sim * sim, struct conf * conf ) { long i; void int_partvec(long target, struct ia_param *, struct conf * conf ); for(i = 0; i < topo->npart; i++){ if ( topo->ia_params[conf->particle[i].type][conf->particle[i].type].geotype[0] < SP) int_partvec(i,&(topo->ia_params[conf->particle[i].type][conf->particle[i].type]),conf); } } /*generate interations pairs*/ void genparampairs(struct topo * topo, BOOL (*exclusions)[MAXT][MAXT]) { int i,j,k; int a[2]; int len; double length = 0; // The length of a PSC, currently only one is allow, ie implemented for (i=0;i<MAXT;i++) { for (j=0;j<MAXT;j++) { if (i!=j) { if((topo->ia_params[j][j].geotype[0] != 0) && (topo->ia_params[i][i].geotype[0] != 0)){ a[0] = i; a[1] = j; for(k = 0; k < 2; k++){ topo->ia_params[i][j].geotype[k] = topo->ia_params[a[k]][a[k]].geotype[0]; topo->ia_params[i][j].len[k] = topo->ia_params[a[k]][a[k]].len[0]; if (topo->ia_params[a[k]][a[k]].len[0] > 0){ if (length == 0){ length = topo->ia_params[a[k]][a[k]].len[0]; } else if (length > 0){ if (length != topo->ia_params[a[k]][a[k]].len[0]){ fprintf(stderr, "Error: "); fprintf(stderr, "Different lengths for spherocylinders have not been implemented yet!\n"); fprintf(stderr, "\tCheck the length of type %d!\n", a[k]); exit(1); } } } topo->ia_params[i][j].half_len[k] = topo->ia_params[a[k]][a[k]].half_len[0]; /* Handle angles only, when geotype is a patchs sphero cylinder */ if(topo->ia_params[i][j].geotype[k] >= PSC && topo->ia_params[i][j].geotype[k] < SP){ topo->ia_params[i][j].pangl[k] = topo->ia_params[a[k]][a[k]].pangl[0]; topo->ia_params[i][j].panglsw[k] = topo->ia_params[a[k]][a[k]].panglsw[0]; topo->ia_params[i][j].pcangl[k] = cos(topo->ia_params[i][j].pangl[k]/2.0/180*PI); topo->ia_params[i][j].pcanglsw[k] = cos((topo->ia_params[i][j].pangl[k]/2.0+topo->ia_params[i][j].panglsw[k])/180*PI); topo->ia_params[i][j].pcoshalfi[k] = cos((topo->ia_params[i][j].pangl[k]/2.0+topo->ia_params[i][j].panglsw[k])/2.0/180*PI); topo->ia_params[i][j].psinhalfi[k] = sqrt(1.0 - topo->ia_params[i][j].pcoshalfi[k] * topo->ia_params[i][j].pcoshalfi[k]); } /* Only when the PSC is chiral */ if( (topo->ia_params[i][j].geotype[k] == CHCPSC) || (topo->ia_params[i][j].geotype[k] == CHPSC) \ || (topo->ia_params[i][j].geotype[k] == TCHCPSC) || (topo->ia_params[i][j].geotype[k] == TCHPSC) ){ topo->ia_params[i][j].chiral_cos[k] = topo->ia_params[a[k]][a[k]].chiral_cos[0]; topo->ia_params[i][j].chiral_sin[k] = topo->ia_params[a[k]][a[k]].chiral_sin[0]; } /* Information of two patches */ if( (topo->ia_params[i][j].geotype[k] == TCPSC) || (topo->ia_params[i][j].geotype[k] == TPSC) \ || (topo->ia_params[i][j].geotype[k] == TCHCPSC) || (topo->ia_params[i][j].geotype[k] == TCHPSC) ){ topo->ia_params[i][j].csecpatchrot[k] = topo->ia_params[a[k]][a[k]].csecpatchrot[0]; topo->ia_params[i][j].ssecpatchrot[k] = topo->ia_params[a[k]][a[k]].ssecpatchrot[0]; topo->ia_params[i][j].pangl[k+2] = topo->ia_params[a[k]][a[k]].pangl[2]; topo->ia_params[i][j].panglsw[k+2] = topo->ia_params[a[k]][a[k]].panglsw[2]; topo->ia_params[i][j].pcangl[k+2] = cos(topo->ia_params[i][j].pangl[k+2]/2.0/180*PI); topo->ia_params[i][j].pcanglsw[k+2] = cos((topo->ia_params[i][j].pangl[k+2]/2.0+topo->ia_params[i][j].panglsw[k+2])/180*PI); topo->ia_params[i][j].pcoshalfi[k+2] = cos((topo->ia_params[i][j].pangl[k+2]/2.0+topo->ia_params[i][j].panglsw[k+2])/2.0/180*PI); topo->ia_params[i][j].psinhalfi[k+2] = sqrt(1.0 - topo->ia_params[i][j].pcoshalfi[k+2] * topo->ia_params[i][j].pcoshalfi[k+2]); } } len = strlen(topo->ia_params[i][i].name); strncpy(topo->ia_params[i][j].name, topo->ia_params[i][i].name, len + 1); len = strlen(topo->ia_params[i][i].other_name); strncpy(topo->ia_params[i][j].other_name, topo->ia_params[i][i].other_name, len + 1); topo->ia_params[i][j].sigma = AVER(topo->ia_params[i][i].sigma,topo->ia_params[j][j].sigma); topo->ia_params[i][j].epsilon = sqrt(topo->ia_params[i][i].epsilon * topo->ia_params[j][j].epsilon); topo->ia_params[i][j].pswitch = AVER(topo->ia_params[i][i].pswitch,topo->ia_params[j][j].pswitch); topo->ia_params[i][j].rcutwca = (topo->ia_params[i][j].sigma)*pow(2.0,1.0/6.0); // Averaging of the flat part of attraction topo->ia_params[i][j].pdis = AVER(topo->ia_params[i][i].pdis - topo->ia_params[i][i].rcutwca, \ topo->ia_params[j][j].pdis - topo->ia_params[j][j].rcutwca) + topo->ia_params[i][j].rcutwca; topo->ia_params[i][j].rcut = topo->ia_params[i][j].pswitch+topo->ia_params[i][j].pdis; // if not non-attractive == if attractive if (!((topo->ia_params[i][j].geotype[0] % 10 == 0) || (topo->ia_params[i][j].geotype[1] % 10 == 0))){ if (topo->ia_params[i][j].rcutwca > topo->ia_params[i][j].rcut){ fprintf(stderr, "Error: Repulsive cutoff is larger than the attractive cutoff!\n"); fprintf(stderr, " between %d and %d: %lf > %lf\n", i, j, topo->ia_params[i][j].rcutwca, topo->ia_params[i][j].rcut); } } if ( topo->ia_params[i][j].rcutwca > topo->sqmaxcut ) topo->sqmaxcut = topo->ia_params[i][j].rcutwca; if ( topo->ia_params[i][j].rcut > topo->sqmaxcut ) topo->sqmaxcut = topo->ia_params[i][j].rcut; } } } /*filling interaction with external potential*/ if( (topo->exter.exist) && (topo->ia_params[i][i].geotype[0] != 0)){ /*use everything like for given particles except distance and attraction, which is generated as for other interactions*/ topo->exter.interactions[i] = topo->ia_params[i][i]; topo->exter.interactions[i].sigma = AVER(topo->ia_params[i][i].sigma, topo->exter.thickness); topo->exter.interactions[i].rcutwca = (topo->exter.interactions[i].sigma)*pow(2.0,1.0/6.0); topo->exter.interactions[i].epsilon = sqrt(topo->ia_params[i][i].epsilon * topo->exter.epsilon); topo->exter.interactions[i].pswitch = AVER(topo->ia_params[i][i].pswitch, topo->exter.attraction); topo->exter.interactions[i].pdis = AVER(topo->ia_params[i][i].pdis - topo->ia_params[i][i].rcutwca, 0.0) + topo->exter.interactions[i].rcutwca; topo->exter.interactions[i].rcut = topo->exter.interactions[i].pswitch + topo->exter.interactions[i].pdis; if (topo->exter.interactions[i].rcut > topo->exter.sqmaxcut ) topo->exter.sqmaxcut = topo->exter.interactions[i].rcut; } } for (i=0;i<MAXT;i++) { for (j=0;j<MAXT;j++) { if ( (*exclusions)[i][j] ) topo->ia_params[i][j].epsilon = 0.0; } } } /*initialize parameters for interactions*/ void initparams(struct topo * topo) { int i,j,k; for (i=0;i<MAXT;i++) { for (j=0;j<MAXT;j++) { for(k = 0; k < 2; k++){ topo->ia_params[i][j].geotype[k] = 0; topo->ia_params[i][j].len[k] = 0.0; topo->ia_params[i][j].half_len[k] = 0.0; topo->ia_params[i][j].chiral_cos[k] = 0.0; topo->ia_params[i][j].chiral_sin[k] = 0.0; topo->ia_params[i][j].csecpatchrot[k] = 0.0; topo->ia_params[i][j].ssecpatchrot[k] = 0.0; } for(k = 2; k < 4; k++){ topo->ia_params[i][j].pangl[k] = 0.0; topo->ia_params[i][j].panglsw[k] = 0.0; topo->ia_params[i][j].pcangl[k] = 0.0; topo->ia_params[i][j].pcanglsw[k] = 0.0; topo->ia_params[i][j].pcoshalfi[k] = 0.0; topo->ia_params[i][j].psinhalfi[k] = 0.0; } topo->ia_params[i][j].sigma = 0.0; topo->ia_params[i][j].epsilon = 0.0; topo->ia_params[i][j].rcutwca = 0.0; topo->ia_params[i][j].pdis = 0.0; topo->ia_params[i][j].pswitch = 0.0; topo->ia_params[i][j].rcut = 0.0; topo->ia_params[i][j].volume = 0.0; topo->ia_params[i][j].pvolscale = 0.0; } } topo->sqmaxcut = 0; } /*...........................................................................*/ /*filling the system parameters*/ int fillsystem(char *pline, char *sysnames[MAXN], long **sysmoln) { int i,fields; char zz[STRLEN]; void trim (char *); trim(pline); if (!pline) { fprintf (stderr, "TOPOLOGY ERROR: obtained empty line in fil system.\n\n"); return 0; } i=0; while (sysnames[i]!=NULL) i++; fields = sscanf(pline, "%s %ld", zz, &(*sysmoln)[i]); sysnames[i]=malloc(strlen(zz)+1); strcpy(sysnames[i],zz); if (fields != 2) { fprintf (stderr, "TOPOLOGY ERROR: failed reading system from (%s).\n\n", pline); return 0; } if ((*sysmoln)[i] < 1) { fprintf (stderr, "TOPOLOGY ERROR: cannot have %ld number of molecules.\n\n", (*sysmoln)[i]); return 0; } fprintf (stdout, "system: %s %ld\n",sysnames[i],(*sysmoln)[i]); return 1; } /*filling the parameters for molecules*/ int fillmol(char *molname, char *pline, struct molecule * molecules, struct topo * topo) { DEBUG_INIT("fillmol just has been called!"); char str[STRLEN],str2[STRLEN],molcommand[STRLEN],molparams[STRLEN]; int i,j,fields; double bondk,bonddist; void trim (char *); void upstring(char *); void beforecommand(char *, char *, char); void aftercommand(char *, char *, char); beforecommand(str2, pline, CLOSEMOL); aftercommand(str, str2, OPENMOL); trim(str); if (strlen(str) == 0) return 1; beforecommand(molcommand,str,SEPARATOR); aftercommand(molparams,str,SEPARATOR); trim(molcommand); trim(molparams); upstring (molcommand); DEBUG_INIT("molcommand: %s", molcommand); DEBUG_INIT("molparams: %s", molparams); i=0; while (strcmp(molecules[i].name, molname)) i++; j=0; while (molecules[i].type[j] != -1) j++; if (!strcmp(molcommand,"PARTICLES")) { fprintf (stdout, "particle %d: \t", j + 1); fields = sscanf(molparams,"%ld %ld %lf",molecules[i].type + j, molecules[i].switchtype + j, molecules[i].delta_mu + j); fprintf (stdout, "%ld ",molecules[i].type[j]); if (fields == 1){ (molecules[i].switchtype[j]) = (molecules[i].type[j]); (molecules[i].delta_mu[j]) = 0; fields = 3; } else{ fprintf(stdout, "(with switchtype: %ld and delta_mu: %lf)", molecules[i].switchtype[j], molecules[i].delta_mu[j]); } if (fields != 3) { fprintf (stderr, "TOPOLOGY ERROR: could not read a pacticle.\n\n"); return 0; } fflush(stdout); if (molecules[i].type[j] < 0) { fprintf (stderr, "TOPOLOGY ERROR: pacticles include negative type.\n\n"); return 0; } if (molecules[i].type[j] > MAXT) { fprintf (stderr, "TOPOLOGY ERROR: pacticles include type out of range 0-%ld.\n\n",(long)MAXT); return 0; } fprintf (stdout, "\n"); return 1; } if (!strcmp(molcommand,"BOND1")) { fields = sscanf(molparams, "%le %le ", &bondk, &bonddist); if (fields < 2) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for bond1, should be 2.\n\n"); return 0; } if (bonddist < 0) { fprintf (stderr, "TOPOLOGY ERROR: bonddist cannot be negative: %f \n\n",bonddist); return 0; } topo->chainparam[i].bond1c = bondk; topo->chainparam[i].bond1eq = bonddist; fprintf (stdout, "bond1: %f %f \n",topo->chainparam[i].bond1c,topo->chainparam[i].bond1eq); return 1; } if (!strcmp(molcommand,"BOND2")) { fields = sscanf(molparams, "%le %le ", &bondk, &bonddist); if (fields < 2) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for bond2, should be 2.\n\n"); return 0; } if (bonddist < 0) { fprintf (stderr, "TOPOLOGY ERROR: bonddist cannot be negative: %f \n\n",bonddist); return 0; } topo->chainparam[i].bond2c = bondk; topo->chainparam[i].bond2eq = bonddist; fprintf (stdout, "bond2: %f %f \n",topo->chainparam[i].bond2c,topo->chainparam[i].bond2eq); return 1; } if (!strcmp(molcommand,"BONDD")) { fields = sscanf(molparams, "%le %le ", &bondk, &bonddist); if (fields < 2) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for bondd, should be 2.\n\n"); return 0; } if (bonddist < 0) { fprintf (stderr, "TOPOLOGY ERROR: bonddist cannot be negative: %f \n\n",bonddist); return 0; } topo->chainparam[i].bonddc = bondk; topo->chainparam[i].bonddeq = bonddist; fprintf (stdout, "bondd: %f %f \n",topo->chainparam[i].bonddc,topo->chainparam[i].bonddeq); return 1; } if (!strcmp(molcommand,"ANGLE1")) { fields = sscanf(molparams, "%le %le ", &bondk, &bonddist); if (fields < 2) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for angle1, should be 2.\n\n"); return 0; } if (bonddist < 0) { fprintf (stderr, "TOPOLOGY ERROR: equilibrium angle cannot be negative: %f \n\n",bonddist); return 0; } topo->chainparam[i].angle1c = bondk; topo->chainparam[i].angle1eq = bonddist/180.0*PI; fprintf (stdout, "angle1: %f %f \n",topo->chainparam[i].angle1c,topo->chainparam[i].angle1eq); return 1; } if (!strcmp(molcommand,"ANGLE2")) { fields = sscanf(molparams, "%le %le ", &bondk, &bonddist); if (fields < 2) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for angle2, should be 2.\n\n"); return 0; } if (bonddist < 0) { fprintf (stderr, "TOPOLOGY ERROR: equilibrium angle cannot be negative: %f \n\n",bonddist); return 0; } topo->chainparam[i].angle2c = bondk; topo->chainparam[i].angle2eq = bonddist/180.0*PI; fprintf (stdout, "angle2: %f %f \n",topo->chainparam[i].angle2c,topo->chainparam[i].angle2eq); return 1; } fprintf (stderr, "TOPOLOGY ERROR: unknown parameter: %s.\n\n",molcommand); return 0; } /* Converts the geometrical type string into a number */ int convert_geotype(char * geotype){ if (strcmp(geotype, "CPSC") == 0) return CPSC; if (strcmp(geotype, "CHCPSC") == 0) return CHCPSC; if (strcmp(geotype, "SCA") == 0) return SCA; if (strcmp(geotype, "PSC") == 0) return PSC; if (strcmp(geotype, "CHPSC") == 0) return CHPSC; if (strcmp(geotype, "TCPSC") == 0) return TCPSC; if (strcmp(geotype, "TCHCPSC") == 0) return TCHCPSC; if (strcmp(geotype, "TPSC") == 0) return TPSC; if (strcmp(geotype, "TCHPSC") == 0) return TCHPSC; if (strcmp(geotype, "SPN") == 0) return SPN; if (strcmp(geotype, "SPA") == 0) return SPA; return 0; } /*filling the parameters of external potentail - wall. Returns 1 on succes.*/ int fillexter(char **pline, struct topo * topo) { int fields; double param[3]; /* 0: thickness * 1: epsilon * 2: attraction */ char typestr[STRLEN], paramstr[STRLEN]; void trim (char *); void beforecommand(char *, char *, char); void aftercommand(char *, char *, char); beforecommand(typestr, *pline, SEPARATOR); aftercommand(paramstr, *pline, SEPARATOR); fields = sscanf(paramstr, "%le %le %le", &param[0], &param[1], &param[2]); if (fields >3) { fprintf (stderr, "TOPOLOGY ERROR: too many parameters for external potential. We have \ thickness, epsilon, and attraction distance so far.\n\n"); return 0; } if (fields >0) { topo->exter.exist = TRUE; topo->exter.thickness = param[0]; fprintf(stdout, "External potential with thickness: %le ",topo->exter.thickness); if (fields >1) { topo->exter.epsilon = param[1]; fprintf(stdout, "epsilon: %le ",topo->exter.epsilon); if (fields >2) { topo->exter.attraction = param[2]; fprintf(stdout, "and range of attraction: %le ",topo->exter.attraction); } } } else{ topo->exter.exist = FALSE; fprintf(stdout, "No external potential "); } fprintf(stdout, " \n"); DEBUG_INIT("Finished filling external potential"); return 1; } /*filling pair for which we exlude attraction interaction. Returns 1 on succes.*/ int fillexclusions(char **pline, BOOL (*exlusions)[MAXT][MAXT]) { long num1,num2; char *pline1, *pline2; void trim (char *); num1 = strtol(*pline, &pline2, 10); trim(pline2); if ((int)strlen(pline2) > 0) { num2 = strtol(pline2, &pline1, 10); trim(pline1); (*exlusions)[num1][num2]=TRUE; (*exlusions)[num2][num1]=TRUE; fprintf(stderr, "Exclusions %ld %ld \n", num1, num2); } else { fprintf(stderr, "Error in readin Topology exclusions, probably there is not even number of types \n"); return 0; } while ((int)strlen(pline1) > 0) { num1 = strtol(pline1, &pline2, 10); trim(pline2); if ((int)strlen(pline2) > 0) { num2 = strtol(pline2, &pline1, 10); trim(pline1); (*exlusions)[num1][num2]=TRUE; (*exlusions)[num2][num1]=TRUE; fprintf(stderr, "Exclusions %ld %ld \n", num1, num2); } else { fprintf(stderr, "Error in readin Topology exclusions, probably there is not even number of types \n"); return 0; } } return 1; } /*filing the parameters for types from given strings. Returns 1 on succes.*/ int filltypes(char **pline, struct topo * topo) { int type; int geotype_i; int fields; char name[SMSTR]; char geotype[SMSTR]; double param[11]; /* 0: epsilon * 1: sigma * 2: attraction dist * 3: sttraction switch * 4: patch angle * 5: patch switch * 6: length * 7(optional): second patche rotation * 8(optional): second patch angle * 9(optional): second patch angle switch * +1: chirality */ char typestr[STRLEN], paramstr[STRLEN]; void trim (char *); void beforecommand(char *, char *, char); void aftercommand(char *, char *, char); beforecommand(typestr, *pline, SEPARATOR); aftercommand(paramstr, *pline, SEPARATOR); fields = sscanf(paramstr, "%s %d %s %le %le %le %le %le %le %le %le %le %le %le", name, &type, geotype, &param[0], &param[1], &param[2], &param[3], &param[4], &param[5], &param[6], &param[7], &param[8], &param[9], &param[10]); fields -= 5; // number of parameter fields => I am too lazy to adjust everywhere below the numbers //DEBUG fprintf (stdout, "Topology read geotype: %ld with parameters fields %d, str:%s and %s in pline %s\n",geotype,fields,geotypestr,paramstr,pline); geotype_i = convert_geotype(geotype); if(!geotype_i){ fprintf(stderr, "TOPOLOGY ERROR: Unknown GEOTYPE: %s!", geotype); return 0; } DEBUG_INIT("geotype_i: %d; fields = %d", geotype_i, fields); if (( (geotype_i == SCN) || (geotype_i == SPN) ) && (fields != 0)) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 1.\n\n", geotype); return 0; } if (( (geotype_i == SCA) || (geotype_i == SPA)) && (fields != 2)) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 3.\n\n", geotype); return 0; } if (( (geotype_i == PSC) || (geotype_i == CPSC) ) && (fields != 5)) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 5.\n\n", geotype); return 0; } if (( (geotype_i == CHCPSC) || (geotype_i == CHCPSC) )&& ( fields != 6)) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 6.\n\n", geotype); return 0; } if (( (geotype_i == TPSC) || (geotype_i == TCPSC) ) && (fields != 8)) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 8.\n\n", geotype); return 0; } if (( (geotype_i == TCHCPSC) || (geotype_i == TCHCPSC) )&& ( fields != 9)) { fprintf (stderr, "TOPOLOGY ERROR: wrong number of parameters for %s geotype, should be 9.\n\n", geotype); return 0; } if ((geotype_i < 0) || (geotype_i > (MAXT + 10))) { fprintf (stderr, "TOPOLOGY ERROR: geotype (%s) is out of range: 0 - %d.\n\n", geotype, MAXT + 10); return 0; } strcpy(topo->ia_params[type][type].name, name); strcpy(topo->ia_params[type][type].other_name, name); topo->ia_params[type][type].geotype[0] = geotype_i; topo->ia_params[type][type].geotype[1] = geotype_i; topo->ia_params[type][type].epsilon = param[0]; topo->ia_params[type][type].sigma = param[1]; topo->ia_params[type][type].rcutwca = (topo->ia_params[type][type].sigma)*pow(2.0,1.0/6.0); fprintf(stdout, "Topology read of %d: %s (geotype: %s, %d) with parameters %lf %lf", type, name, geotype, geotype_i, topo->ia_params[type][type].epsilon, topo->ia_params[type][type].sigma); if (fields > 0) { topo->ia_params[type][type].pdis = param[2]; topo->ia_params[type][type].pswitch = param[3]; topo->ia_params[type][type].rcut = topo->ia_params[type][type].pswitch+topo->ia_params[type][type].pdis; fprintf(stdout, " %f %f",topo->ia_params[type][type].pdis,topo->ia_params[type][type].pswitch); } if (fields > 2) { int i; for(i = 0; i < 2; i++){ topo->ia_params[type][type].len[i] = param[6]; topo->ia_params[type][type].half_len[i] = param[6] / 2; topo->ia_params[type][type].pangl[i] = param[4]; topo->ia_params[type][type].panglsw[i] = param[5]; topo->ia_params[type][type].pcangl[i] = cos(param[4]/2.0/180*PI); // C1 topo->ia_params[type][type].pcanglsw[i] = cos((param[4]/2.0+param[5])/180*PI); // C2 //topo->ia_params[type][type].pcangl[i] = topo->ia_params[type][type].pcangl[i]; //topo->ia_params[type][type].pcanglsw[i] = topo->ia_params[type][type].pcanglsw[i]; topo->ia_params[type][type].pcoshalfi[i] = cos((param[4]/2.0+param[5])/2.0/180*PI); topo->ia_params[type][type].psinhalfi[i] = sqrt(1.0 - topo->ia_params[type][type].pcoshalfi[i] * topo->ia_params[type][type].pcoshalfi[i]); } fprintf(stdout, " %f %f", topo->ia_params[type][type].pangl[0], topo->ia_params[type][type].panglsw[0]); } if(fields == 6){ int i; for(i = 0; i < 2; i++){ topo->ia_params[type][type].chiral_cos[i] = cos(param[7] / 360 * PI); topo->ia_params[type][type].chiral_sin[i] = sqrt(1 - topo->ia_params[type][type].chiral_cos[i] * topo->ia_params[type][type].chiral_cos[i]); fprintf(stdout, " %f ", param[7]); } } if ((fields == 8)||(fields == 9)) { int i; for(i = 0; i < 2; i++){ topo->ia_params[type][type].csecpatchrot[i] = cos(param[7] / 360 * PI); topo->ia_params[type][type].ssecpatchrot[i] = sqrt(1 - topo->ia_params[type][type].csecpatchrot[i] * topo->ia_params[type][type].csecpatchrot[i]); //fprintf(stdout, " %f %f", topo->ia_params[type][type].csecpatchrot[0], topo->ia_params[type][type].ssecpatchrot[0]); topo->ia_params[type][type].pangl[i+2] = param[8]; topo->ia_params[type][type].panglsw[i+2] = param[9]; topo->ia_params[type][type].pcangl[i+2] = cos(param[8]/2.0/180*PI); // C1 topo->ia_params[type][type].pcanglsw[i+2] = cos((param[8]/2.0+param[9])/180*PI); // C2 //topo->ia_params[type][type].pcangl[i] = topo->ia_params[type][type].pcangl[i]; //topo->ia_params[type][type].pcanglsw[i] = topo->ia_params[type][type].pcanglsw[i]; topo->ia_params[type][type].pcoshalfi[i+2] = cos((param[8]/2.0+param[9])/2.0/180*PI); topo->ia_params[type][type].psinhalfi[i+2] = sqrt(1.0 - topo->ia_params[type][type].pcoshalfi[i+2] * topo->ia_params[type][type].pcoshalfi[i+2]); } fprintf(stdout, " %f %f %f", param[7], topo->ia_params[type][type].pangl[2], topo->ia_params[type][type].panglsw[2]); } if(fields == 9){ int i; for(i = 0; i < 2; i++){ topo->ia_params[type][type].chiral_cos[i] = cos(param[10] / 360 * PI); topo->ia_params[type][type].chiral_sin[i] = sqrt(1 - topo->ia_params[type][type].chiral_cos[i] * topo->ia_params[type][type].chiral_cos[i]); fprintf(stdout, " %f ", param[9]); } } // Volume if (geotype_i < SP) topo->ia_params[type][type].volume = 4.0/3.0*PI*pow((topo->ia_params[type][type].sigma)/2.0,3.0) + PI/2.0*topo->ia_params[type][type].len[0]*pow((topo->ia_params[type][type].sigma)/2.0,2.0) ; else topo->ia_params[type][type].volume = 4.0/3.0*PI*pow((topo->ia_params[type][type].sigma)/2.0,3.0); if ( topo->ia_params[type][type].rcutwca > topo->sqmaxcut ) topo->sqmaxcut = topo->ia_params[type][type].rcutwca; if ( topo->ia_params[type][type].rcut > topo->sqmaxcut ) topo->sqmaxcut = topo->ia_params[type][type].rcut; fprintf(stdout, " \n"); DEBUG_INIT("Finished filltypes"); return 1; } /************************************************ * String Manipulation stuff for parsing files ************************************************/ /* return string that goes before comand character*/ void beforecommand(char *str,char *pline,char commandc) { char *dummy; void trim(char *); strcpy(str,pline); if ((dummy = strchr (str,commandc)) != NULL) (*dummy) = 0; trim (str); } /* return string that goes after command character */ void aftercommand(char *str, char *pline,char commandc) { char *dummy; int i; void trim(char *); strcpy(str,pline); if ((dummy = strchr (str,commandc)) != NULL) { i=0; while( (*dummy) != str[i]) { str[i] = ' '; i++; } str[i] = ' '; } trim (str); } /* reads a string from stream of max length n */ char *fgets2(char *line, int n, FILE *stream) { char *c; if (fgets(line,n,stream)==NULL) { return NULL; } if ((c=strchr(line,'\n'))!=NULL) *c=0; return line; } /* remove comments */ void strip_comment (char *line) { char *c; if (!line) return; /* search for a comment mark and replace it by a zero */ if ((c = strchr(line,COMMENTSIGN)) != NULL) (*c) = 0; } /*test is there is still something left in string*/ int continuing(char *s) { int sl; void rtrim (char *str); rtrim(s); sl = strlen(s); if ((sl > 0) && (s[sl-1] == CONTINUE)) { s[sl-1] = 0; return 1; /*true*/ } else return 0; /*false*/ } /*make strin uppercase*/ void upstring (char *str) { int i; for (i=0; (i < (int)strlen(str)); i++) str[i] = toupper(str[i]); } /*trim string from left*/ void ltrim (char *str) { char *tr; int c; if (!str) return; tr = strdup (str); c = 0; while ((tr[c] == ' ') || (tr[c] == '\n') || (tr[c] == '\t')) c++; strcpy (str,tr+c); free (tr); } /*trim string from right*/ void rtrim (char *str) { int nul; if (!str) return; nul = strlen(str)-1; while ((nul > 0) && ((str[nul] == ' ') || (str[nul] == '\t') || (str[nul] == '\n')) ) { str[nul] = '\0'; nul--; } } /*trim strin from left and right*/ void trim (char *str) { void ltrim (char *str); void rtrim (char *str); ltrim (str); rtrim (str); } /** * Dumps a configuration to the supplied file handle. */ void draw(FILE *outfile, /*struct vector box, long npart, struct particles *particle,*/ struct conf * conf, struct topo * topo) { long i; double anint(double); //fprintf (outfile, "%15.8le %15.8le %15.8le\n", box.x, box.y, box.z); for (i = 0; i < topo->npart; i++) { fprintf (outfile, "%15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %15.8le %d\n", conf->box.x * ((conf->particle[i].pos.x) - anint(conf->particle[i].pos.x)), conf->box.y * ((conf->particle[i].pos.y) - anint(conf->particle[i].pos.y)), conf->box.z * ((conf->particle[i].pos.z) - anint(conf->particle[i].pos.z)), conf->particle[i].dir.x, conf->particle[i].dir.y, conf->particle[i].dir.z, conf->particle[i].patchdir[0].x, conf->particle[i].patchdir[0].y, conf->particle[i].patchdir[0].z, conf->particle[i].switched); } } /*............................................................................*/ /****************************************************************************/ /* Pairlist stuf */ /****************************************************************************/ /** * Initializes the pairlist and allocates memory */ void init_pairlist(struct topo * topo, struct sim * sim){ printf("\nAllocating memory for pairlist...\n"); sim->pairlist = xmalloc(sizeof(struct pairs) * topo->npart); // Highest guess: Every particle interacts with the others // TODO: Make it more sophisticated long i; for(i = 0; i < topo->npart; i++){ sim->pairlist[i].pairs = malloc(sizeof(long) * topo->npart); sim->pairlist[i].num_pairs = 0; } } /*............................................................................*/ /** * Cleans up: deallocates the memory for the pairlist */ int dealloc_pairlist(struct topo * topo, struct sim * sim){ long i; if(sim->pairlist != NULL){ for(i = 0; i < topo->npart; i++){ if(sim->pairlist[i].pairs != NULL){ free(sim->pairlist[i].pairs); } } free(sim->pairlist); } return 0; } /*............................................................................*/ /** * Generates a pairlist with a very basic alogrithm */ void gen_simple_pairlist(struct topo * topo, struct sim * sim, struct conf * conf){ struct vector r_cm; double r_cm2; double max_dist; // Set the pairlist to zero //DEBUG_INIT("Gen Pairlist") long i, j; for(i = 0; i < topo->npart; i++){ //DEBUG_INIT("%ld", i); sim->pairlist[i].num_pairs = 0; } long nj = topo->npart; long ni = nj - 1; for(i = 0; i < ni; i++){ for(j = i + 1; j < nj; j++){ r_cm.x = conf->particle[i].pos.x - conf->particle[j].pos.x; r_cm.y = conf->particle[i].pos.y - conf->particle[j].pos.y; r_cm.z = conf->particle[i].pos.z - conf->particle[j].pos.z; if ( r_cm.x < 0 ) r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x-0.5) ) ); else r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x+0.5) ) ); if ( r_cm.y < 0 ) r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y-0.5) ) ); else r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y+0.5) ) ); if ( r_cm.z < 0 ) r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z-0.5) ) ); else r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z+0.5) ) ); r_cm2 = DOT(r_cm,r_cm); max_dist = AVER(sim->trans[conf->particle[i].type].mx, \ sim->trans[conf->particle[j].type].mx); max_dist *= (1 + sim->pairlist_update) * 2; max_dist += topo->maxcut; max_dist *= max_dist; /* squared */ if (r_cm2 <= max_dist){ sim->pairlist[i].pairs[sim->pairlist[i].num_pairs++] = j; sim->pairlist[j].pairs[sim->pairlist[j].num_pairs++] = i; } } } ////Check for too many pairs //for(i = 0; i < topo->npart; i++){ // //if (sim->pairlist.list[i].num_pairs >= topo->npart) // if (sim->pairlist[i].num_pairs >= topo->npart){ // fprintf(stderr, "ERROR: Too many pairs for particle %ld!!!\n", i); // exit(1); // } //} } /*.............................................................................*/ /** * Interface for the generation of the pairlist. Define other pairlist * algorithms above. */ void gen_pairlist(struct topo * topo, struct sim * sim, struct conf * conf){ gen_simple_pairlist(topo, sim, conf); } /*.............................................................................*/ /** * Print out the pairlist */ void print_pairlist(FILE * stream, struct sim * sim, struct topo * topo){ long i, j; for (i = 0; i < topo->npart; i++){ fprintf(stream, "%ld (%ld):", i, sim->pairlist[i].num_pairs); for(j = 0; j < sim->pairlist[i].num_pairs; j++){ fprintf(stream, " %ld", sim->pairlist[i].pairs[j]); } fprintf(stream, "\n"); } } /*..........................................................................*/ /****************************************************************************/ /* Cluster statistics stuf */ /****************************************************************************/ /** * determines, wheter two particles are in the same cluster */ int same_cluster(struct topo * topo, struct conf * conf, long fst, long snd, double (* intfce[MAXT][MAXT])(struct interacts *) ){ /*if two particles are bonded they belong to the same cluster*/ if ( ((topo->chainparam[conf->particle[fst].chaint]).bond1c >= 0) || ((topo->chainparam[conf->particle[fst].chaint]).bonddc >= 0) ){ if ( (snd == topo->conlist[fst][1]) || (snd == topo->conlist[fst][0]) ) { return TRUE; } } if ( ((topo->chainparam[conf->particle[snd].chaint]).bond1c >= 0) || ((topo->chainparam[conf->particle[snd].chaint]).bonddc >= 0) ){ if ( (fst == topo->conlist[snd][1]) || (fst == topo->conlist[snd][0]) ) { return TRUE; } } /*cluster is made of particles closer tna some distance*/ /* struct vector image(struct vector r1, struct vector r2, struct vector box); struct vector r_cm = image(conf->particle[fst].pos, conf->particle[snd].pos, conf->box); double dist2 = DOT(r_cm, r_cm); * TODO: Make it much more efficient => define cluster_dist!!! * if(dist2 > topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].sigma * topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].sigma*4.0){ return FALSE; } else { return TRUE; }*/ /*cluster is made of attractively interacting particles*/ double paire(long, long, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct conf * conf); if(paire(fst, snd, intfce, topo, conf) > -0.10 ){ return FALSE; } else { return TRUE; } } /*............................................................................*/ /** * generate the clusterlist */ int gen_clusterlist(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)){ int change = TRUE; /* does it still change? */ //long neighbour; long i, j, fst, snd, tmp, minnumber, maxnumber; int same_cluster(struct topo * topo, struct conf * conf, long fst, long snd, double (* intfce[MAXT][MAXT])(struct interacts *)); // Set clusterindex to the corresponding index for( i = 0; i < topo->npart; i++){ sim->clusterlist[i] = i; } // Start determining the cluster while(change){ change = FALSE; for(i = 0; i < topo->npart; i++){ /*If nore pairlist go over all pairs*/ maxnumber = topo->npart; minnumber = i ; if (sim->pairlist_update) { maxnumber = sim->pairlist[i].num_pairs; minnumber=0; } /* Go over pairs to see if they are in the cluster */ for(j = minnumber; j < maxnumber; j++){ fst = i; snd = j; if (sim->pairlist_update) { snd = sim->pairlist[i].pairs[j]; } /*do cluster analysis only for spherocylinders*/ if ( (topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].geotype[0] < SP) && \ (topo->ia_params[conf->particle[fst].type][conf->particle[snd].type].geotype[1] < SP) ) { /* if they are close to each other */ if(same_cluster(topo, conf, fst, snd, intfce)){ if(fst > snd){ tmp = snd; snd = fst; fst = tmp; } if(sim->clusterlist[fst] < sim->clusterlist[snd]){ sim->clusterlist[snd] = sim->clusterlist[fst]; change = TRUE; break; /* => will eventually start the i loop from new */ } if(sim->clusterlist[snd] < sim->clusterlist[fst]){ sim->clusterlist[fst] = sim->clusterlist[snd]; change = TRUE; break; /* => will eventually start the i loop from new */ } } } } if(change){ break; } } } return 0; } /*............................................................................*/ /** * sort the clusterlist */ int sort_clusterlist(struct topo * topo, struct sim * sim){ long cluster_indices[topo->npart]; /* holds the different cluster indices. (currently too much memory) */ long num_cluster = 0; /* number of clusters, temporary needed */ long i, j; /* how many clusters are there? */ long max_index = -1; for(i = 0; i < topo->npart; i++){ if(max_index < sim->clusterlist[i]){ max_index = sim->clusterlist[i]; cluster_indices[num_cluster++] = max_index; } } /* free the memory from the old clusters */ if(sim->clusters){ for(i = 0; i < sim->num_cluster; i++){ if(sim->clusters[i].particles){ free(sim->clusters[i].particles); } } free(sim->clusters); } /* Allocate memory for the clusters */ sim->clusters = xmalloc(sizeof(struct cluster) * num_cluster); for(i = 0; i < num_cluster; i++){ /* allocate maximal space for all the clusters */ sim->clusters[i].particles = xmalloc(sizeof(long) * topo->npart); sim->clusters[i].npart = 0; } /* fill in the particles belonging to one cluster */ for(i = 0; i < num_cluster; i++){ for(j = 0; j < topo->npart; j++){ if(sim->clusterlist[j] == cluster_indices[i]){ sim->clusters[i].particles[sim->clusters[i].npart++] = j; } } } sim->num_cluster = num_cluster; /* Find the biggest size */ sim->max_clust = 0; for(i = 0; i < num_cluster; i++){ if(sim->clusters[i].npart > sim->max_clust){ sim->max_clust = sim->clusters[i].npart; } } /* Set the statistics to zero */ sim->clusterstat = xmalloc(sizeof(long) * sim->max_clust); for(i = 0; i < sim->max_clust; i++){ sim->clusterstat[i] = 0; } /* Do the statistics */ for(i = 0; i < num_cluster; i++){ sim->clusterstat[sim->clusters[i].npart - 1]++; } return 0; } /*............................................................................*/ /** * calculate energies of clusters * */ int calc_clusterenergies(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)){ long i,j,k; double paire(long, long, double (* intfce[MAXT][MAXT])(struct interacts *), struct topo * topo, struct conf * conf); for(i = 0; i < sim->num_cluster; i++){ sim->clustersenergy[i]=0.0; for(j = 0; j < sim->clusters[i].npart; j++){ for(k = j+1; k < sim->clusters[i].npart; k++){ sim->clustersenergy[i]+= paire(sim->clusters[i].particles[j], sim->clusters[i].particles[k], intfce, topo, conf); } } } return 0; } /*............................................................................*/ /** * print the clusterlist * */ int print_clusterlist(FILE * stream, BOOL decor, struct topo * topo, struct sim * sim, struct conf * conf){ long i; if(decor){ fprintf(stream, "\n" "-----------------------------------------------------\n" " The Cluster List\n" " (Index starts with 1)\n" "-----------------------------------------------------\n"); } for(i = 0; i < topo->npart; i++){ fprintf(stream,"%3ld %3ld %8.4lf %8.4lf %8.4lf", i + 1, sim->clusterlist[i] + 1, conf->particle[i].pos.x, conf->particle[i].pos.y, conf->particle[i].pos.z); fprintf(stream,"\n"); } if(decor){ fprintf(stream,"-----------------------------------------------------\n"); } fflush(stream); return 0; } /*............................................................................*/ /** * print the clusters * */ int print_clusters(FILE * stream, BOOL decor, struct sim * sim){ long i, j; if(decor){ fprintf(stream, "\n" "-----------------------------------------------------\n" " The Clusters\n" " (Index starts with 1)\n" "-----------------------------------------------------\n"); } for(i = 0; i < sim->num_cluster; i++){ fprintf(stream, "%3ld(%f):", i + 1,sim->clustersenergy[i]); for(j = 0; j < sim->clusters[i].npart; j++){ fprintf(stream, "%5ld", sim->clusters[i].particles[j] + 1); } fprintf(stream, "\n"); } if(decor){ fprintf(stream,"---------------------------------------------------\n"); } fflush(stream); return 0; } /*............................................................................*/ /** * print a statistics for the clusters */ int print_clusterstat(FILE * stream, BOOL decor, struct sim * sim){ long i; if(decor){ fprintf(stream, "\n" "-----------------------------------------------------\n" " Cluster Distribution\n" "-----------------------------------------------------\n"); } for(i = 0; i < sim->max_clust; i++){ fprintf(stream, "%5ld\t%5ld\n", i + 1, sim->clusterstat[i]); } if(decor){ fprintf(stream, "--------------------------------------------------\n"); } fflush(stream); return 0; } /*............................................................................*/ /** * Alternative way of printing the cluster statistics: everything is on * one line. First monomers, then dimers etc. */ int print_clstat_oneline(FILE * stream, long sweep, struct sim * sim){ long i; fprintf(stream, "%ld: ", sweep); for(i = 0; i < sim->max_clust; i++){ fprintf(stream, "%5ld\t", sim->clusterstat[i]); } fprintf(stream, "\n"); fflush(stream); return 0; } /** * write out all the cluster stat in files, if file name is given */ int write_cluster(FILE * cl_stat, FILE * cl, FILE * cl_list, BOOL decor, long sweep, struct sim * sim, struct topo * topo, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)){ int gen_clusterlist(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); int sort_clusterlist(struct topo * topo, struct sim * sim); int print_clusters(FILE * stream, BOOL decor, struct sim * sim); int calc_clusterenergies(struct topo * topo, struct sim * sim, struct conf * conf, double (* intfce[MAXT][MAXT])(struct interacts *)); gen_clusterlist(topo, sim, conf, intfce); sort_clusterlist(topo, sim); calc_clusterenergies(topo, sim, conf, intfce); if(cl_stat){ if(decor == FALSE){ // if no decor, this means usually into a file. Hence print info // about number of line per frame fprintf(cl_stat, "Sweep: %ld | Maximal size: %ld\n", sweep, sim->max_clust); } print_clusterstat(cl_stat, decor, sim); /* print_clstat_oneline(cl_stat, sweep, sim); */ } if(cl){ if(decor == FALSE){ fprintf(cl, "Sweep: %ld | Number of clusters: %ld\n", sweep, sim->num_cluster); } print_clusters(cl, decor, sim); } if(cl_list){ if(decor == FALSE){ fprintf(cl_list, "Sweep: %ld | Number of particles: %ld\n", sweep, topo->npart); } print_clusterlist(cl, decor, topo, sim, conf); } return 0; } /*............................................................................*/ /****************************************************************************/ /* Wang-Landau stuf */ /****************************************************************************/ /* Initiate Wang-Landau calculation. */ int wlinit(struct wls *wl, char filename[30]) { long i,length,fields=0; double field[5]; FILE *infile; char line[STRLEN]; int wlend(struct wls *); void trim(char *); void strip_comment(char *); infile = fopen(filename, "r"); if (infile == NULL) { fprintf (stderr, "\nERROR: Could not open %s file.\n\n",filename); return 1; } length=0; while (fgets2(line,STRLEN-2,infile) != NULL) { strip_comment (line); trim (line); /* if there is something left... */ if ((int)strlen(line) > 0) { length++; } } length--; /*there is alpha at the first line*/ (*wl).weights = malloc( sizeof(double) * length ); (*wl).hist = malloc( sizeof(long) * length ); (*wl).length[1] = 0; (*wl).dorder[1] = 0; fseek(infile,0,SEEK_SET); i=0; while (fgets2(line,STRLEN-2,infile) != NULL) { strip_comment (line); trim (line); /* if there is something left... */ if ((int)strlen(line) > 0) { if (i == 0) { if (sscanf(line, "%le",&(*wl).alpha)!= 1) { fprintf (stderr, "ERROR: Could not read alpha at the begining.\n\n"); wlend(wl); return 1; } else i++; } else { fields = sscanf(line, "%le %le %le %le",&field[0],&field[1],&field[2],&field[3]); if ( fields == 3 ) { if (i==1) (*wl).minorder[0] = field[0]; (*wl).weights[i-1] = field[1]; (*wl).hist[i-1] = field[2]; (*wl).length[0]++; i++; } else if (fields == 4 ) { if (i==1) { (*wl).minorder[0] = field[0]; (*wl).minorder[1] = field[1]; } if ( (*wl).minorder[1] == field[1] ) (*wl).length[0]++; (*wl).weights[i-1] = field[2]; (*wl).hist[i-1] = field[3]; i++; } else { fprintf (stderr, "ERROR: Could not read order parameter at line %ld.\n\n", i); wlend(wl); return 1; } } } } if (fields == 4 ) { (*wl).length[1] = length / (*wl).length[0]; (*wl).dorder[1] = (field[1] - (*wl).minorder[1])/((*wl).length[1]-1); } (*wl).dorder[0] = (field[0] - (*wl).minorder[0])/((*wl).length[0]-1); if ( ( (i-1) != (*wl).length[0] ) && (fields==3) ) { fprintf (stderr, "ERROR: In reading order parameters length %ld does not fit number of lines %ld.\n\n", (*wl).length[0],i-1); wlend(wl); return 1; } if ( ( (i-1) != (*wl).length[0]*(*wl).length[1] ) && (fields==4) ) { fprintf (stderr, "ERROR: In reading order parameters lengths %ld %ld does not fit number of lines %ld.\n\n", (*wl).length[0],(*wl).length[1],i-1); wlend(wl); return 1; } /*DEBUG*/ printf("Wang-Landau method init:\n"); printf("alpha: %f\n",(*wl).alpha); /*int j=0; if ((*wl).length[1] == 0) { for (i=0; i<(*wl).length[0]; i++) { printf ("%15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).weights[i], (*wl).hist[i]); } } else { for (j=0; j<(*wl).length[1]; j++) { for (i=0; i<(*wl).length[0]; i++) { printf ("%15.8le %15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).minorder[1]+j*(*wl).dorder[1], (*wl).weights[i+(*wl).length[0]*j], (*wl).hist[i+(*wl).length[0]*j]); } printf (" \n"); } }*/ fclose(infile); fflush(stdout); /**/ return 0; } int wlwrite(struct wls *wl, char filename[30]) { long i,j; FILE *outfile; outfile = fopen(filename, "w"); if (outfile == NULL) { fprintf (stderr, "\nERROR: Could not open %s file.\n\n",filename); return 1; } fprintf (outfile, "%15.8le \n",(*wl).alpha); if ((*wl).length[1] == 0) { for (i=0; i<(*wl).length[0]; i++) { fprintf (outfile, "%15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).weights[i], (*wl).hist[i]); } } else { for (j=0; j<(*wl).length[1]; j++) { for (i=0; i<(*wl).length[0]; i++) { fprintf (outfile, "%15.8le %15.8le %15.8le %ld \n",(*wl).minorder[0] + i * (*wl).dorder[0], (*wl).minorder[1]+j*(*wl).dorder[1], (*wl).weights[i+(*wl).length[0]*j], (*wl).hist[i+(*wl).length[0]*j]); } fprintf (outfile, " \n"); } } fflush(outfile); fclose(outfile); return 0; } int wlend(struct wls *wl) { free((*wl).weights); free((*wl).hist); return 0; } void wlreject(struct sim *sim, long oldlength) { int mesh_cpy(struct meshs *, struct meshs *); int longarray_cpy (long **target, long **source,long,long); if ( sim->wlm[0] > 0 ) { sim->wl.weights[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]] -= sim->wl.alpha; sim->wl.hist[sim->wl.currorder[0]+sim->wl.currorder[1]*sim->wl.length[0]]++; if ( (sim->wlm[0] == 2) || (sim->wlm[1] == 2) ) mesh_cpy(&sim->wl.mesh,&sim->wl.origmesh); if ( (sim->wlm[0] == 5) || (sim->wlm[1] == 5)||(sim->wlm[0] == 6) || (sim->wlm[1] == 6) ) { longarray_cpy(&sim->wl.radiushole,&sim->wl.radiusholeold,sim->wl.radiusholemax,oldlength); sim->wl.radiusholemax = oldlength; } sim->wl.partincontact = sim->wl.partincontactold; } } void wlaccept(int wlm,struct wls *wl) { int i; if ( wlm > 0 ) { for (i=0;i<2;i++) (*wl).currorder[i] = (*wl).neworder[i]; (*wl).weights[ (*wl).currorder[0] + (*wl).currorder[1] * (*wl).length[0]] -= (*wl).alpha; (*wl).hist[ (*wl).currorder[0] + (*wl).currorder[1] * (*wl).length[0]]++; } } /*..............................................................................*/ /*........................NAMETIC ORDER.........................................*/ /*..............................................................................*/ /* Calculates the instantaneous value of the nematic order parameter for the specified configuration. The nematic director is determined by diagonalisation of the tensor order parameter Q (see Allen & Tildesley p305). The order parameter is the corresponding eigenvalue. However, it is equivalent to take minus two times the middle eigenvalue (see Eppenga & Frenkel, Mol Phys vol. 52, p.1303-1334 [1984]), and this is more reliable for comparing the isotropic phase. This is the approach taken in this implementation. Routines from Numerical Recipes are used to perform the diagonalisation. Note that these routines expect an n*n matrix to be stored in elements [1...n][1...n], rather than [0...n-1][0...n-1], so the arrays must be declared with one more element in each dimension. */ double nematic(long npart, struct particles *p) { double q[4][4] = {{0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}}; double d[4], e[4]; long i; void tred2(double [4][4], double [4], double [4]); void tqli(double [4], double [4]); for (i=0; i<npart; i++) { q[1][1] += p[i].dir.x * p[i].dir.x; q[1][2] += p[i].dir.x * p[i].dir.y; q[1][3] += p[i].dir.x * p[i].dir.z; q[2][1] += p[i].dir.y * p[i].dir.x; q[2][2] += p[i].dir.y * p[i].dir.y; q[2][3] += p[i].dir.y * p[i].dir.z; q[3][1] += p[i].dir.z * p[i].dir.x; q[3][2] += p[i].dir.z * p[i].dir.y; q[3][3] += p[i].dir.z * p[i].dir.z; } q[1][1] = (q[1][1] * 3.0 / npart - 1.0) / 2.0; q[1][2] = (q[1][2] * 3.0 / npart ) / 2.0; q[1][3] = (q[1][3] * 3.0 / npart ) / 2.0; q[2][1] = (q[2][1] * 3.0 / npart ) / 2.0; q[2][2] = (q[2][2] * 3.0 / npart - 1.0) / 2.0; q[2][3] = (q[2][3] * 3.0 / npart ) / 2.0; q[3][1] = (q[3][1] * 3.0 / npart ) / 2.0; q[3][2] = (q[3][2] * 3.0 / npart ) / 2.0; q[3][3] = (q[3][3] * 3.0 / npart - 1.0) / 2.0; tred2 (q, d, e); tqli (d, e); /* Sort eigenvalues */ if (d[1] > d[2]) { d[0]=d[1]; d[1]=d[2]; d[2]=d[0]; } if (d[2] > d[3]) { d[0]=d[2]; d[2]=d[3]; d[3]=d[0]; } if (d[1] > d[2]) { d[0]=d[1]; d[1]=d[2]; d[2]=d[0]; } return -2.0*d[2]; } /*..............................................................................*/ /* Returns the coefficient of the Fourier series term with period boxlength/n in the z direction. The coefficients of the sine and cosine terms are added in quadrature and returned, making the result independent of phase shifts in the z direction. A significantly non-zero value indicates layering of the particles in the z direction with periodicity boxlength/n. */ double smectic(long npart, struct particles *p, long n) { double a, b; double omega = 8.0*n*atan(1.0); long i; a = b = 0.0; for (i=0; i<npart; i++) { a += cos(omega * p[i].pos.z); b += sin(omega * p[i].pos.z); } a /= (double)npart; b /= (double)npart; return sqrt(a*a + b*b); } /*..............................................................................*/ /*........................Z ORDER PARAMETER.....................................*/ long z_order(struct wls *wl, struct conf * conf,int wli) { // printf("%f %ld\n",particle[0].pos.z * box.z,lround(particle[0].pos.z * box.z / wl.dorder[wli] - wl.minorder[wli])); /* Because older C compilators do not know lround we can use ceil as well return lround(particle[0].pos.z * box.z / wl.dorder[wli] - wl.minorder[wli]);*/ /* printf("%f ",conf->particle[0].pos.z ); printf("%f ",conf->syscm.z); printf("%f ",conf->box.z); printf("%f ", wl->minorder[wli]); printf("%f \n", wl->dorder[wli] );*/ return (long) ceil( ((conf->particle[0].pos.z - conf->syscm.z) * conf->box.z- wl->minorder[wli]) / wl->dorder[wli] ); } /*..............................................................................*/ /*........................2 particles distance.....................................*/ long twopartdist(struct wls *wl, struct conf * conf, int wli) { struct vector r_cm; r_cm.x = conf->particle[0].pos.x - conf->particle[1].pos.x; r_cm.y = conf->particle[0].pos.y - conf->particle[1].pos.y; r_cm.z = conf->particle[0].pos.z - conf->particle[1].pos.z; if ( r_cm.x < 0 ) r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x-0.5) ) ); else r_cm.x = conf->box.x * (r_cm.x - (double)( (long)(r_cm.x+0.5) ) ); if ( r_cm.y < 0 ) r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y-0.5) ) ); else r_cm.y = conf->box.y * (r_cm.y - (double)( (long)(r_cm.y+0.5) ) ); if ( r_cm.z < 0 ) r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z-0.5) ) ); else r_cm.z = conf->box.z * (r_cm.z - (double)( (long)(r_cm.z+0.5) ) ); return (long) ceil( (( sqrt(r_cm.x*r_cm.x + r_cm.y*r_cm.y) ) - wl->minorder[wli]) / wl->dorder[wli] ); } /*..............................................................................*/ /*........................alignment ORDER PARAMETER.....................................*/ double alignment_order(struct conf * conf, struct topo * topo) { double sumdot=0; long i,j; struct vector r_cm; struct vector image(struct vector, struct vector, struct vector); for (i = 0; i < topo->npart - 1; i++) { for (j = i + 1; j < topo->npart; j++) { r_cm = image(conf->particle[i].pos, conf->particle[j].pos, conf->box); if ( DOT(r_cm,r_cm) < 1.5*1.5 ) { sumdot+= DOT(conf->particle[i].dir,conf->particle[j].dir); } } } return sumdot; } /*..............................................................................*/ /*........................HOLE IN MESH-MEMBRANE ORDER PARAM.....................*/ /* return change in order parameter when one particle moves*/ long meshorder_moveone(struct vector oldpos, struct vector newpos, struct meshs *mesh, long npart, long target, struct conf * conf, struct sim * sim, int wli) { int change; int nx,ny,ox,oy; /* position in mesh */ double resid; void mesh_fill(struct meshs *, long , struct particles *, struct sim * sim); int mesh_findholes(struct meshs *); int mesh_addpart(double, double, int **, int [2]); int mesh_removepart(double, double, int **, int [2]); if ( conf->particle[target].type != sim->wl.wlmtype ) return sim->wl.currorder[wli]; nx = (int) (INBOX(newpos.x,resid) * (*mesh).dim[0]); ny = (int) (INBOX(newpos.y,resid) * (*mesh).dim[1]); ox = (int) (INBOX(oldpos.x,resid) * (*mesh).dim[0]); oy = (int) (INBOX(oldpos.y,resid) * (*mesh).dim[1]); if ( (nx == ox) && (ny == oy) ) return sim->wl.currorder[wli]; /* particle stayed in the same mesh bin*/ change = mesh_addpart(newpos.x,newpos.y,&(*mesh).data,(*mesh).dim); if (change) { change = mesh_removepart(oldpos.x,oldpos.y,&(*mesh).data,(*mesh).dim); } if ( !change ) { /* fill the mesh with particles*/ mesh_fill(mesh,npart,conf->particle, sim); return (long) (mesh_findholes(mesh) - sim->wl.minorder[wli]); } return sim->wl.currorder[wli]; } /* return change in order parameter when chain moves*/ long meshorder_movechain(long chain[MAXN], struct meshs *mesh, long npart, struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL], int wli) { long i,current; int change; void mesh_fill(struct meshs *, long , struct particles *, struct sim * sim); int mesh_findholes(struct meshs *); int mesh_addpart(double, double, int **, int [2]); int mesh_removepart(double, double, int **, int [2]); change= 1; i = 0; current = chain[0]; while ( (current >=0 ) && (change) ) { if ( conf->particle[current].type == sim->wl.wlmtype ) change = mesh_addpart(conf->particle[current].pos.x, conf->particle[current].pos.y, &(*mesh).data, (*mesh).dim); i++; current = chain[i]; } i = 0; current = chain[0]; while ( (current >=0 ) && (change) ) { if ( conf->particle[current].type == sim->wl.wlmtype ) change = mesh_removepart(chorig[i].pos.x, chorig[i].pos.y, &(*mesh).data, (*mesh).dim); i++; current = chain[i]; } if ( !change ) { /* fill the mesh with particles*/ mesh_fill(mesh,npart,conf->particle, sim); return (long) (mesh_findholes(mesh) - sim->wl.minorder[wli]); } return sim->wl.currorder[wli]; } /* filling the mesh */ void mesh_fill(struct meshs *mesh, long npart, struct particles *particle, struct sim * sim) { long i; int mesh_addpart(double posx, double posy, int **mesh, int dim[2]); for ( i=0; i<((*mesh).dim[0] * (*mesh).dim[1]); i++) { (*mesh).data[i] = 0; } for (i=0; i<npart; i++) { /*calculate position of particle on mesh and add it to all where it belongs */ if (particle[i].type == sim->wl.wlmtype) mesh_addpart(particle[i].pos.x,particle[i].pos.y, &(*mesh).data, (*mesh).dim); } } /* add particle on coordinates posx posy to mesh return 0 if it was placed on empty spot*/ int mesh_addpart(double posx, double posy, int **mesh, int dim[2]) { int i, square[9], onhole; double resid; void mesh_square(int , int , int [2], int (*)[9]); onhole = 1; mesh_square( (int) (INBOX(posx,resid) * dim[0]), (int) (INBOX(posy,resid) * dim[1]) , dim, &square); for(i=0;i<9;i++) { if ( (square[i] >= dim[0]*dim[1])||(square[i] <0) ) { printf ("Error: trying to write to %d\n",square[i]); printf ("%d %d and %d\n", (int) (INBOX(posx,resid) * dim[0]), (int) (INBOX(posy,resid) * dim[1]),i ); fflush(stdout); } if ( ((*mesh)[ square[i] ]) >= 0 ) onhole = 0; (*mesh)[ square[i] ]--; } return onhole; } /* remove particle on coordinates posx posy from mesh and return 0 if there is a empty spot now*/ int mesh_removepart(double posx, double posy, int **mesh, int dim[2]) { int i, square[9]; double resid; void mesh_square(int , int , int [2], int (*)[9]); mesh_square((int) (INBOX(posx,resid) * dim[0]), (int) (INBOX(posy,resid) * dim[1]) , dim, &square); for(i=0;i<9;i++) { //DEBUG if (square[i] >= dim[0]*dim[1]) printf ("Error: trying to write to %d\n",square[i]); (*mesh)[ square[i] ]++; if ( ((*mesh)[ square[i] ]) == 0 ) return 0; } return 1; } void mesh_square(int x, int y, int dim[2], int (*square)[9]) { int a,b; b=y; (*square)[0] = x + dim[0]*b; a = x-1; if ( a<0 ) a = dim[0]-1; (*square)[1] = a + dim[0]*b; a = x+1; if ( a==dim[0] ) a = 0; (*square)[2] = a + dim[0]*b; b = y-1; if ( b<0 ) b = dim[1]-1; (*square)[3] = x + dim[0]*b; a = x-1; if ( a<0 ) a = dim[0]-1; (*square)[4] = a + dim[0]*b; a = x+1; if ( a==dim[0] ) a = 0; (*square)[5] = a + dim[0]*b; b = y+1; if ( b==dim[1] ) b = 0; (*square)[6] = x + dim[0]*b; a = x-1; if ( a<0 ) a = dim[0]-1; (*square)[7] = a + dim[0]*b; a = x+1; if ( a==dim[0] ) a = 0; (*square)[8] = a + dim[0]*b; } void mesh_neighbors(int pos, int dim[2], int neighbors[4]) { int x,y,a; x = pos % dim[0]; y = pos / dim[0]; a = x-1; if ( a<0 ) a = dim[0]-1; neighbors[0] = a + dim[0]*y; a = x+1; if ( a==dim[0] ) a = 0; neighbors[1] = a + dim[0]*y; a = y-1; if ( a<0 ) a = dim[1]-1; neighbors[2] = x + dim[0]*a; a = y+1; if ( a==dim[1] ) a = 0; neighbors[3] = x + dim[0]*a; } /* returns the number of holes and a list of mesh points belonging to each of them */ int mesh_findholes(struct meshs *mesh) { int i,j, k, n, size, li, maxsize; int neighbors[4]; void mesh_neighbors(int, int [2], int [4]); n=0; maxsize = 0; for (i=0;i<((*mesh).dim[0] * (*mesh).dim[1]);i++) { (*mesh).tmp[i] = 0; if ( (*mesh).data[i] > 0 ) (*mesh).data[i] = 0; } i=0; // go through all mesh points while ( i < ((*mesh).dim[0] * (*mesh).dim[1]) ) { // test if mesh point is occupied if ( (*mesh).data[i] != 0 ) { i++; } else { // mesh point is free, create a new cluster n++; (*mesh).data[i] = n; // start new cluster, put mesh point as first element, and set list pointer on first element //DEBUG if (n >= mesh.dim[0]*mesh.dim[1]) printf ("Error: trying to write to sizes position %d\n",n); size = 1; (*mesh).tmp[0] = i; li = 0; // go through all elements of the cluster while ( li < size ) { //go through all neighbors j = (*mesh).tmp[li]; mesh_neighbors(j, (*mesh).dim, neighbors); for ( k=0; k<4; k++ ) { // test if status is free and append it to the cluster if ( (*mesh).data[ neighbors[k] ] == 0 ) { (*mesh).data[ neighbors[k] ] = n; // append mesh point as element in the list (*mesh).tmp[size] = neighbors[k]; size++; } if ( (*mesh).data[ neighbors[k] ] > 0 && (*mesh).data[ neighbors[k] ]<n ) { fprintf(stderr,"Error: Mesh cluster out of range, propably going infinite through pbc."); fflush(stderr); } } li++; } if (size > maxsize) maxsize = size; } } return maxsize; } int mesh_init(struct meshs *mesh, double meshsize, long npart, struct conf * conf, struct sim * sim) { // int i; int maxsize,length; void mesh_fill(struct meshs *, long , struct particles *, struct sim * sim); int mesh_findholes(struct meshs *); (*mesh).dim[0] = (int)(conf->box.x/meshsize); (*mesh).dim[1] = (int)(conf->box.y/meshsize); if ( (*mesh).data != NULL ) free((*mesh).data); if ( (*mesh).tmp != NULL ) free((*mesh).tmp); length = (*mesh).dim[0] * (*mesh).dim[1]; (*mesh).data = malloc( sizeof(int)* (length)); (*mesh).tmp = malloc( sizeof(int)* (length+1)); /* fill the mesh with particles*/ mesh_fill(mesh, npart,conf->particle, sim); /* perfrom hole cluster algorithm */ maxsize = mesh_findholes(mesh); /*DEBUG printf("maxsize: %d\n",maxsize); printf("mesh:\n"); for (i=0;i<mesh.dim[0]*mesh.dim[1];i++) { printf("%d ",mesh.data[i]); if ( ((i+1) % mesh.dim[0]) == 0) printf("\n"); }*/ return maxsize; } void mesh_print (struct meshs *mesh) { int i; int mesh_findholes(struct meshs *); printf("mesh:\n"); for (i=0;i<(*mesh).dim[0] * (*mesh).dim[1];i++) { printf("%d ",(*mesh).data[i]); if ( ((i+1) % (*mesh).dim[0]) == 0) printf("\n"); } printf("hole %d:\n", mesh_findholes(mesh) ); printf("\n"); } int mesh_cpy (struct meshs *target, struct meshs *source) { if ( (*target).data != NULL) { if ( ((*target).dim[0] == (*source).dim[0]) && ((*target).dim[1] == (*source).dim[1]) ) { memcpy((*target).data,(*source).data, sizeof(int)* ((*target).dim[0] * (*target).dim[1]) ); return 0; } else { free ((*target).data); if ( (*source).dim[0] * (*source).dim[1] > (*target).dim[0] * (*target).dim[1] ) { if ((*target).tmp != NULL ) free ((*target).tmp); (*target).tmp = malloc( sizeof(int)* ((*source).dim[0] * (*source).dim[1] + 1)); } } } (*target).dim[0] = (*source).dim[0]; (*target).dim[1] = (*source).dim[1]; (*target).data = malloc( sizeof(int)* ((*target).dim[0] * (*target).dim[1])); if ((*target).tmp == NULL ) (*target).tmp = malloc( sizeof(int)* ((*source).dim[0] * (*source).dim[1] + 1)); memcpy((*target).data,(*source).data, sizeof(int)* ((*target).dim[0] * (*target).dim[1]) ); return 0; } int mesh_end(struct meshs *mesh) { /* free allocated memory */ if ( (*mesh).data!= NULL ) free((*mesh).data); if ( (*mesh).tmp!= NULL ) free((*mesh).tmp); return 0; } /*..............................................................................*/ /*........................RADIUS HOLE IN CENTER MEMBRANE ORDER PARAM............*/ /*return current bin of free radius*/ long radiushole_order(struct sim * sim) { long i; for (i=0;i<sim->wl.radiusholemax-3;i++){ if ((sim->wl.radiushole[i] >0 ) && (sim->wl.radiushole[i+1] >0 ) && (sim->wl.radiushole[i+2] >0 ) && (sim->wl.radiushole[i+3] >0 )) return i-1; } return -100; } /*return order of given radius */ long radiushole_position(double radius, struct sim * sim, int wli) { return (long) ceil( ( radius - sim->wl.minorder[wli]) / sim->wl.dorder[wli] ); } /* return change in order parameter when one particle moves*/ long radiusholeorder_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target,int wli, struct vector *position) { long nr,or; /* position in radiushole */ double rx,ry,z; BOOL oz,nz; long radiushole_position(double radius, struct sim * sim,int); long radiushole_order(struct sim *sim); double anint(double); void radiushole_print (long *radiushole, long length); if ( conf->particle[target].type != sim->wl.wlmtype ) return sim->wl.currorder[wli]; z=conf->particle[target].pos.z - position->z; /*if above position*/ if (z-anint(z) < 0) nz = FALSE; else nz=TRUE; z=oldpos->z - position->z; /*if above position*/ if (z-anint(z) < 0) oz = FALSE; else oz=TRUE; if ( !(nz) && !(oz) ) return sim->wl.currorder[wli]; rx = conf->box.x * (conf->particle[target].pos.x - anint(conf->particle[target].pos.x)); ry = conf->box.y * (conf->particle[target].pos.y - anint(conf->particle[target].pos.y)); nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli); if (nr < 0) return -100; /*particle move over radius bins*/ if (nz) { sim->wl.radiushole[nr]++; } if (oz) { rx = conf->box.x * (oldpos->x - anint(oldpos->x)); ry = conf->box.y * (oldpos->y - anint(oldpos->y)); or = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli); sim->wl.radiushole[or]--; if ( sim->wl.radiushole[or] < 0 ) { printf ("Error(single particle move): trying to make number of beads in radiuspore smaller than 0 at position %ld\n",or); radiushole_print(sim->wl.radiushole,sim->wl.radiusholemax); fflush(stdout); } if (sim->wl.radiushole[or] ==0) return radiushole_order(sim); } if ( (nz) && (sim->wl.radiushole[nr] ==1) ) { return radiushole_order(sim); } return sim->wl.currorder[wli]; } /* return change in order parameter when chain moves*/ long radiusholeorder_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli, struct vector *position) { long i,current,nr; double rx,ry,z; BOOL change=FALSE; long radiushole_position(double radius, struct sim * sim,int); long radiushole_order(struct sim *sim); double anint(double); void radiushole_print (long *radiushole, long length); i = 0; rx=0; current = chain[0]; while (current >=0 ) { if ( conf->particle[current].type == sim->wl.wlmtype ) { z=conf->particle[current].pos.z - position->z; /*if above system CM*/ if (z-anint(z) > 0) { rx = conf->box.x * (conf->particle[current].pos.x - anint(conf->particle[current].pos.x)); ry = conf->box.y * (conf->particle[current].pos.y - anint(conf->particle[current].pos.y)); nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli); if (nr < 0) return -100; sim->wl.radiushole[nr]++; if ( sim->wl.radiushole[nr] == 1 ) change = TRUE; } } i++; current = chain[i]; } i = 0; current = chain[0]; while (current >=0 ) { if ( conf->particle[current].type == sim->wl.wlmtype ) { z=chorig[i].pos.z - position->z; /*if above system CM*/ if (z-anint(z) > 0) { rx = conf->box.x * (chorig[i].pos.x - anint(chorig[i].pos.x)); ry = conf->box.y * (chorig[i].pos.y - anint(chorig[i].pos.y)); nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli); sim->wl.radiushole[nr]--; if ( sim->wl.radiushole[nr] < 0 ) { printf ("Error (chainmove): trying to make number of beads in radiuspore smaller than 0 at position %ld\n",nr); radiushole_print(sim->wl.radiushole,sim->wl.radiusholemax); fflush(stdout); } if ( sim->wl.radiushole[nr] == 0 ) change = TRUE; } } i++; current = chain[i]; } if ( change ) { return radiushole_order(sim); } return sim->wl.currorder[wli]; } /* filling the radiushole above vec*/ long radiushole_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli, struct vector *position) { long i,nr,radiusholemax; double rx,ry,z; long radiushole_position(double radius, struct sim * sim,int); long radiushole_order(struct sim *sim); double anint(double); radiusholemax = radiushole_position(sqrt(conf->box.x*conf->box.x+conf->box.y*conf->box.y),sim,wli); if ( radiusholemax > sim->wl.radiusholemax ) { if (sim->wl.radiushole != NULL) free(sim->wl.radiushole); sim->wl.radiushole = malloc( sizeof(long)* (radiusholemax)); sim->wl.radiusholemax = radiusholemax; } for (i=0;i<radiusholemax;i++) { sim->wl.radiushole[i] = 0; } for (i=0; i< topo->npart; i++) { /*calculate position of particle from z axis, and add it in array */ if ( conf->particle[i].type == sim->wl.wlmtype ) { z=conf->particle[i].pos.z - (*position).z; /*if above position*/ if (z-anint(z) > 0) { rx = conf->box.x * (conf->particle[i].pos.x - anint(conf->particle[i].pos.x)); ry = conf->box.y * (conf->particle[i].pos.y - anint(conf->particle[i].pos.y)); nr = radiushole_position(sqrt(rx*rx+ry*ry),sim,wli); if (nr < 0) return -100; sim->wl.radiushole[nr]++; } } } return radiushole_order(sim); } void radiushole_print (long *radiushole, long length) { long i; printf("radiushole:\n"); for (i=0;i<length;i++) { printf("%ld ",radiushole[i]); } printf("\n"); } int longarray_cpy (long **target, long **source, long targetlength, long sourcelength) { /*if ( (*target) != NULL) { if ( targetlength == sourcelength ) { memcpy((*target),(*source), sizeof(long)*(sourcelength)); return 0; } else { free(*target); } }*/ if ( (*target) != NULL) (*target) = (long*) realloc((*target), sizeof(long)*(sourcelength)); else (*target) = malloc( sizeof(long)*(sourcelength)); memcpy((*target),(*source), sizeof(long)*(sourcelength)); return 0; } /*..............................................................................*/ /* ............................... particles in contact ..................... */ /*return order for particles in contact */ long contparticles_order(struct sim * sim, int wli) { return (long) ceil( ( sim->wl.partincontact - sim->wl.minorder[wli]) / sim->wl.dorder[wli] ); } /*returns if particle is in contact*/ BOOL particleinncontact (struct vector *vec, struct conf *conf) { double x,y,z; double anint(double); x = vec->x - conf->particle[0].pos.x; y = vec->y - conf->particle[0].pos.y; z = vec->z - conf->particle[0].pos.z; x = conf->box.x * (x - anint(x)); y = conf->box.y * (y - anint(y)); z = conf->box.z * (z - anint(z)); if ( x*x + y*y + z*z < WL_CONTACTS) { return TRUE; } else { return FALSE; } } /* return change in number of particles in contact when one particle moves*/ long contparticles_moveone(struct vector *oldpos, struct conf *conf, struct sim * sim, long target,int wli) { long contparticles_order(struct sim * sim, int wli); BOOL particleinncontact (struct vector *vec, struct conf *conf); if ( conf->particle[target].type != sim->wl.wlmtype ) return sim->wl.currorder[wli]; if ( particleinncontact (&(conf->particle[target].pos),conf) ) sim->wl.partincontact++; if ( particleinncontact (oldpos,conf) ) sim->wl.partincontact--; return contparticles_order(sim,wli); } /* return change in order parameter when chain moves*/ long contparticles_movechain(long chain[MAXN], struct conf * conf, struct sim * sim,struct particles chorig[MAXCHL],int wli) { long i,current; long contparticles_order(struct sim * sim, int wli); BOOL particleinncontact (struct vector *vec, struct conf *conf); i = 0; current = chain[0]; while (current >=0 ) { if ( conf->particle[current].type == sim->wl.wlmtype ) { if ( particleinncontact (&(conf->particle[current].pos),conf) ) sim->wl.partincontact++; } i++; current = chain[i]; } i = 0; current = chain[0]; while (current >=0 ) { if ( conf->particle[current].type == sim->wl.wlmtype ) { if ( particleinncontact (&(chorig[i].pos),conf) ) sim->wl.partincontact--; } i++; current = chain[i]; } return contparticles_order(sim,wli); } /* filling all particles in the contact */ long contparticles_all(struct topo *topo, struct conf *conf, struct sim * sim,int wli) { long i; long contparticles_order(struct sim * sim, int wli); BOOL particleinncontact (struct vector *vec, struct conf *conf); sim->wl.partincontact = 0; for (i=1; i< topo->npart; i++) { /*calculate position of particle and add it if in contact */ if ( conf->particle[i].type == sim->wl.wlmtype ) { if ( particleinncontact (&(conf->particle[i].pos),conf) ) sim->wl.partincontact++; } } return contparticles_order(sim,wli); } /*..............................................................................*/ /*........................GEOMETRIC STUFF.......................................*/ /*..............................................................................*/ /*..............................................................................*/ /* Find closest distance between line segments and return its vector gets orientations and lengths of line segments and the vector connecting their center os masses (from vec1 to vec2) */ // Copyright 2001, softSurfer (www.softsurfer.com) // This code may be freely used and modified for any purpose // providing that this copyright notice is included with it. // SoftSurfer makes no warranty for this code, and cannot be held // liable for any real or imagined damage resulting from its use. // Users of this code must verify correctness for their application. struct vector mindist_segments(struct vector dir1, double halfl1, struct vector dir2, double halfl2, struct vector r_cm) { struct vector u,v,w,vec; double a,b,c,d,e,D,sc,sN,sD,tc,tN,tD; struct vector vec_scale(struct vector, double); u = vec_scale(dir1,2.0*halfl1); //S1.P1 - S1.P0; v = vec_scale(dir2,2.0*halfl2); //S2.P1 - S2.P0; w.x = dir2.x*halfl2 - dir1.x*halfl1 - r_cm.x; w.y = dir2.y*halfl2 - dir1.y*halfl1 - r_cm.y; w.z = dir2.z*halfl2 - dir1.z*halfl1 - r_cm.z; //S1.P0 - S2.P0; a = DOT(u,u); // always >= 0 b = DOT(u,v); c = DOT(v,v); // always >= 0 d = DOT(u,w); e = DOT(v,w); D = a*c - b*b; // always >= 0 sc = D; sN = D; sD = D; // sc = sN / sD, default sD = D >= 0 tc = D; tN = D; tD = D; // tc = tN / tD, default tD = D >= 0 // compute the line parameters of the two closest points if (D < 0.00000001) { // the lines are almost parallel sN = 0.0; // force using point P0 on segment S1 sD = 1.0; // to prevent possible division by 0.0 later tN = e; tD = c; } else { // get the closest points on the infinite lines sN = (b*e - c*d); tN = (a*e - b*d); if (sN < 0.0) { // sc < 0 => the s=0 edge is visible sN = 0.0; tN = e; tD = c; } else if (sN > sD) { // sc > 1 => the s=1 edge is visible sN = sD; tN = e + b; tD = c; } } if (tN < 0.0) { // tc < 0 => the t=0 edge is visible tN = 0.0; // recompute sc for this edge if (-d < 0.0) sN = 0.0; else if (-d > a) sN = sD; else { sN = -d; sD = a; } } else if (tN > tD) { // tc > 1 => the t=1 edge is visible tN = tD; // recompute sc for this edge if ((-d + b) < 0.0) sN = 0; else if ((-d + b) > a) sN = sD; else { sN = (-d + b); sD = a; } } // finally do the division to get sc and tc if (fabs(sN) < 0.00000001) sc = 0.0 ; else sc = sN / sD; if (fabs(tN) < 0.00000001) tc = 0.0 ; else tc = tN / tD; // get the difference of the two closest points //Vector = w + (sc * u) - (tc * v); // = S1(sc) - S2(tc) vec.x = u.x*sc + w.x - v.x*tc; vec.y = u.y*sc + w.y - v.y*tc; vec.z = u.z*sc + w.z - v.z*tc; return vec; } /*..............................................................................*/ /* Find closest distance between line segment and point and return it as vector (from point to closest segment point) Function gets orientation and length of line segments and the vector connecting their center os masses (from segment to point) */ struct vector mindist_segmentpoint(struct vector dir1, double length, struct vector r_cm) { struct vector vec; double c,d,halfl; halfl=length*0.5; c = DOT(dir1,r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } vec.x = - r_cm.x + dir1.x * d; vec.y = - r_cm.y + dir1.y * d; vec.z = - r_cm.z + dir1.z * d; return vec; } /*..............................................................................*/ /* Determines whether two particles overlap. Returns 1 if there is an overlap, 0 if not. */ int overlap(struct particles part1, struct particles part2, struct vector box, struct ia_param ia_params[MAXT][MAXT]) { double b, c, d, e, f; /* Coefficients in distance quadratic */ double boundary; /* Half length of central boundary zone of quadratic */ double det; double halfl; /* Half length of cylinder */ double s0, t0; /* det times location of min separation of infinite lines */ double ss, tt; /* Location of min separation of line segments */ struct vector r_cm; /* Vector between centres of mass */ double dist; /* Distance between particles*/ struct vector distvec; /* Distance vector between particles*/ double linemin(double, double); struct vector image(struct vector, struct vector, struct vector); r_cm = image(part1.pos, part2.pos, box); if ((part1.type >= SP) && (part2.type >= SP)) { /*we have two spheres - most common, do nothing*/ dist=sqrt(DOT(r_cm,r_cm)); } else { if ((ia_params[part1.type][part2.type].geotype[0] < SP) && (ia_params[part1.type][part2.type].geotype[1] < SP)) { /*we have two spherocylinders*/ /*finding closes contact between them*/ b = -DOT(part1.dir, part2.dir); d = DOT(part1.dir, r_cm); e = -DOT(part2.dir, r_cm); f = DOT(r_cm, r_cm); det = 1.0 - b*b; //halfl = length / 2.0; // Just take the mean halfl = ia_params[part1.type][part2.type].half_len[0] = ia_params[part1.type][part2.type].half_len[1]; halfl /= 2; boundary = det * halfl; /* Location of smallest separation of the infinite lines */ s0 = b*e - d; t0 = b*d - e; /* Location of smallest separation of line segments */ if (s0 >= boundary) { if (t0 >= boundary) { /* Region 2 */ if ( d + halfl + halfl*b < 0.0 ) { ss = halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = halfl; ss = linemin( -tt*b - d, halfl ); } } else if (t0 >= -boundary) { /* Region 1 */ ss = halfl; tt = linemin( -ss*b - e, halfl ); } else { /* Region 8 */ if ( d + halfl - halfl*b < 0.0 ) { ss = halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = -halfl; ss = linemin( -tt*b - d, halfl ); } } } else if (s0 >= -boundary) { if (t0 >= boundary) { /* Region 3 */ tt = halfl; ss = linemin( -tt*b - d, halfl ); } else if (t0 >= -boundary) { /* Region 0 */ ss = s0/det; tt = t0/det; } else { /* Region 7 */ tt = -halfl; ss = linemin( -tt*b - d, halfl ); } } else { if (t0 >= boundary) { /* Region 4 */ if ( d - halfl + halfl*b > 0.0 ) { ss = -halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = halfl; ss = linemin( -tt*b - d, halfl ); } } else if (t0 >= -boundary) { /* Region 5 */ ss = -halfl; tt = linemin( -ss*b - e, halfl ); } else { /* Region 6 */ if ( d - halfl - halfl*b > 0.0 ) { ss = -halfl; tt = linemin( -ss*b - e, halfl ); } else { tt = -halfl; ss = linemin( -tt*b - d, halfl ); } } } /*ss snd tt are Location of min separation of line segments */ dist=sqrt(f + ss*ss + tt*tt + 2.0*(ss*d + tt*e + ss*tt*b)); } else { if (ia_params[part1.type][part2.type].geotype[0] < SP) { /*We have one spherocylinder -it is first one*/ //halfl=length/2;/*finding closest vector from sphyrocylinder to sphere*/ halfl=ia_params[part1.type][part2.type].half_len[0];/*finding closest vector from sphyrocylinder to sphere*/ c = DOT(part1.dir,r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } distvec.x = - r_cm.x + part1.dir.x * d; distvec.y = - r_cm.y + part1.dir.y * d; distvec.z = - r_cm.z + part1.dir.z * d; dist=sqrt(DOT(distvec,distvec)); } else { /*lst option first one is sphere second one spherocylinder*/ //halfl=length/2; /*finding closest vector from sphyrocylinder to sphere*/ halfl=ia_params[part1.type][part2.type].half_len[1];/*finding closest vector from sphyrocylinder to sphere*/ c = DOT(part2.dir,r_cm); if (c >= halfl) d = halfl; else { if (c > -halfl) d = c; else d = -halfl; } distvec.x = r_cm.x - part2.dir.x * d; distvec.y = r_cm.y - part2.dir.y * d; distvec.z = r_cm.z - part2.dir.z * d; dist=sqrt(DOT(distvec,distvec)); } } } /* Overlap exists if smallest separation is less than diameter of cylinder */ if (dist < ia_params[part1.type][part2.type].sigma*0.5 ) { return 1; } else { return 0; } } /*..............................................................................*/ double linemin(double criterion, double halfl) { if (criterion >= halfl) { return halfl; } else if (criterion >= -halfl) { return criterion; } else { return -halfl; } } /*..............................................................................*/ /*........................SOME USEFUL MATH......................................*/ /*..............................................................................*/ /* ran2 from Numerical Recipes. */ #define IM1 2147483563 #define IM2 2147483399 #define AM (1.0/IM1) #define IMM1 (IM1-1) #define IA1 40014 #define IA2 40692 #define IQ1 53668 #define IQ2 52774 #define IR1 12211 #define IR2 3791 #define NTAB 32 #define NDIV (1+IMM1/NTAB) #define EPS 1.2e-7 #define RNMX (1.0-EPS) double ran2(long *idum) { int j; long k; static long idum2=123456789; static long iy=0; static long iv[NTAB]; double temp; if (*idum <= 0) { if (-(*idum) < 1) *idum=1; else *idum = -(*idum); idum2=(*idum); for (j=NTAB+7;j>=0;j--) { k=(*idum)/IQ1; *idum=IA1*(*idum-k*IQ1)-k*IR1; if (*idum < 0) *idum += IM1; if (j < NTAB) iv[j] = *idum; } iy=iv[0]; } k=(*idum)/IQ1; *idum=IA1*(*idum-k*IQ1)-k*IR1; if (*idum < 0) *idum += IM1; k=idum2/IQ2; idum2=IA2*(idum2-k*IQ2)-k*IR2; if (idum2 < 0) idum2 += IM2; j=iy/NDIV; iy=iv[j]-idum2; iv[j] = *idum; if (iy < 1) iy += IMM1; if ((temp=AM*iy) > RNMX) return RNMX; else return temp; } #undef IM1 #undef IM2 #undef AM #undef IMM1 #undef IA1 #undef IA2 #undef IQ1 #undef IQ2 #undef IR1 #undef IR2 #undef NTAB #undef NDIV #undef EPS #undef RNMX /*..............................................................................*/ /* From Numerical Recipes. Simplified to deal specifically with 3*3 matrices (stored as elements [1...3][1...3] or a 4*4 array). */ void tred2(double a[4][4], double d[4], double e[4]) { int l, k, j, i; double scale, hh, h, g, f; for (i=3; i>=2; i--) { l=i-1; h=scale=0.0; if (l > 1) { for (k=1;k<=l;k++) scale += fabs(a[i][k]); if (scale == 0.0) e[i]=a[i][l]; else { for (k=1;k<=l;k++) { a[i][k] /= scale; h += a[i][k]*a[i][k]; } f=a[i][l]; g=(f >= 0.0 ? -sqrt(h) : sqrt(h)); e[i]=scale*g; h -= f*g; a[i][l]=f-g; f=0.0; for (j=1;j<=l;j++) { /* a[j][i]=a[i][j]/h; */ g=0.0; for (k=1;k<=j;k++) g += a[j][k]*a[i][k]; for (k=j+1;k<=l;k++) g += a[k][j]*a[i][k]; e[j]=g/h; f += e[j]*a[i][j]; } hh=f/(h+h); for (j=1;j<=l;j++) { f=a[i][j]; e[j]=g=e[j]-hh*f; for (k=1;k<=j;k++) a[j][k] -= (f*e[k]+g*a[i][k]); } } } else e[i]=a[i][l]; d[i]=h; } /* d[1]=0.0; */ e[1]=0.0; for (i=1; i<=3; i++) { /* l=i-1; if (d[i]) { for (j=1;j<=l;j++) { g=0.0; for (k=1;k<=l;k++) g += a[i][k]*a[k][j]; for (k=1;k<=l;k++) a[k][j] -= g*a[k][i]; } } */ d[i]=a[i][i]; /* a[i][i]=1.0; for (j=1;j<=l;j++) a[j][i]=a[i][j]=0.0; */ } } /*..............................................................................*/ /* From Numerical Recipes. Simplified to deal specifically with 3*3 matrices (stored as elements [1...3][1...3] or a 4*4 array). */ #define NRANSI #define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a)) void tqli(double d[4], double e[4]) { double pythag(double a, double b); int m, l, iter, i; /* int k; */ double s, r, p, g, f, dd, c, b; for (i=2; i<=3; i++) e[i-1] = e[i]; e[3] = 0.0; for (l=1; l<=3; l++) { iter = 0; do { for (m=l; m<=3-1; m++) { dd = fabs(d[m]) + fabs(d[m+1]); if ((double)(fabs(e[m])+dd) == dd) break; } if (m != l) { if (iter++ == 30) { fprintf(stderr, "Too many iterations in tqli\n"); exit (2); } g = (d[l+1] - d[l]) / (2.0*e[l]); r = pythag(g, 1.0); g = d[m] - d[l] + e[l] / (g + SIGN(r,g)); s = c = 1.0; p = 0.0; for (i=m-1; i>=l; i--) { f = s * e[i]; b = c * e[i]; e[i+1] = (r=pythag(f,g)); if (r == 0.0) { d[i+1] -= p; e[m] = 0.0; break; } s = f/r; c = g/r; g = d[i+1] - p; r = (d[i] - g)*s + 2.0*c*b; d[i+1] = g+(p=s*r); g = c*r - b; /* for (k=1; k<=3; k++) { f = z[k][i+1]; z[k][i+1] = s*z[k][i]+c*f; z[k][i] = c*z[k][i]i - s*f; } */ } if (r == 0.0 && i >= l) continue; d[l] -= p; e[l] = g; e[m] = 0.0; } } while (m != l); } } #undef NRANSI /*..............................................................................*/ /* From Numerical Recipes. Used by tqli. */ #define NRANSI static double sqrarg; #define SQR(a) ((sqrarg=(a)) == 0.0 ? 0.0 : sqrarg*sqrarg) double pythag(double a, double b) { double absa, absb; absa = fabs(a); absb = fabs(b); if (absa > absb) return absa*sqrt(1.0+SQR(absb/absa)); else return (absb == 0.0 ? 0.0 : absb*sqrt(1.0+SQR(absa/absb))); } #undef NRANSI /*..............................................................................*/ /* Normalise a vector to have unit length. For speed during heavy use, it is not checked that the supplied vector has non-zero length. */ void normalise(struct vector *u) { double tot; tot = sqrt( DOT(*u,*u) ); if (tot !=0.0) { tot=1/tot; (*u).x *= tot; (*u).y *= tot; (*u).z *= tot; } } /* Returns the vector pointing from the centre of mass of particle 2 to the centre of mass of the closest image of particle 1. */ struct vector image(struct vector r1, struct vector r2, struct vector box) { struct vector r12; double anint(double); r12.x = r1.x - r2.x; r12.y = r1.y - r2.y; r12.z = r1.z - r2.z; r12.x = box.x * (r12.x - anint(r12.x)); r12.y = box.y * (r12.y - anint(r12.y)); r12.z = box.z * (r12.z - anint(r12.z)); return r12; } /* Returns the nearest integer to its argument as a double precision number. e.g. anint(-0.49) = 0.0 and anint(-0.51) = -1.0. Equivalent to the Fortran intrinsic ANINT. */ double anint(double arg) { if (arg < 0) { return (double)( (long)(arg-0.5) ); } else { return (double)( (long)(arg+0.5) ); } } /*..............................................................................*/ /* Returns an evenly distributed random unit vector of unit length. See Allen & Tildesley p349 or Frenkel & Smit p410. RANDOM VECTOR ON UNIT SPHERE */ struct vector ranvec(void) { double a, b, xi1, xi2; struct vector unit; double ran2(long *); do { xi1 = 1.0 - 2.0*ran2(&seed); xi2 = 1.0 - 2.0*ran2(&seed); a = xi1*xi1 + xi2*xi2; } while (a > 1.0); b = 2.0 * sqrt(1.0 - a); unit.x = xi1 * b; unit.y = xi2 * b; unit.z = 1.0 - 2.0*a; return unit; } /** * returns a point randomly and evenly distributed inside of a unit sphere */ struct vector ranvecsph(void) { struct vector ranvec; double ran2(long *); do{ ranvec.x = 2 * ran2(&seed) - 1.0; ranvec.y = 2 * ran2(&seed) - 1.0; ranvec.z = 2 * ran2(&seed) - 1.0; } while(ranvec.x*ranvec.x + ranvec.y*ranvec.y + ranvec.z*ranvec.z >= 1); //printf("%lf\t%lf\t%lf\n", ranvec.x,ranvec.y,ranvec.z); return ranvec; } /**** some useful math *******/ struct vector vec_create(double x, double y, double z) { struct vector newvec; newvec.x=x; newvec.y=y; newvec.z=z; return newvec; } struct vector vec_createarr(double a[3]) { struct vector newvec; newvec.x=a[0]; newvec.y=a[1]; newvec.z=a[2]; return newvec; } double vec_dotproduct(struct vector A,struct vector B) { double dp; dp = A.x*B.x + A.y*B.y + A.z*B.z; return dp; } /* vector projection of vector A to direction of B*/ struct vector vec_project(struct vector* A,struct vector* B) { double dp; struct vector pr; dp = A->x*B->x + A->y*B->y + A->z*B->z; pr.x=B->x*dp; pr.y=B->y*dp; pr.z=B->z*dp; return pr; } void ortogonalise(struct vector *A, struct vector B) { double dp; double vec_dotproduct(struct vector A,struct vector B); dp=vec_dotproduct(*A,B); (*A).x -= B.x * dp; (*A).y -= B.y * dp; (*A).z -= B.z * dp; } /* vector projection of vector A perpendicular to direction of B*/ struct vector vec_perpproject(struct vector *A,struct vector *B) { struct vector pp; double dp; struct vector vec_project(struct vector *, struct vector*); dp=DOT((*A),(*B)); pp.x = A->x - B->x*dp; pp.y = A->y - B->y*dp; pp.z = A->z - B->z*dp; // fprintf (stderr, "pp x: %.8f y: %.8f z: %.8f \n",pp.x,pp.y,pp.z); return pp; } /* returns a vector perpendicular to A nothing special about the vector except that it's one of the perpendicular options and is normalized */ struct vector vec_perp(struct vector A) { double ratio,x,y; struct vector somevector; struct vector vec_create(double, double, double); struct vector vec_normalize(struct vector); void normalise(struct vector *); struct vector vec_crossproduct(struct vector, struct vector); x=A.x; y=A.y; if (x == 0) x=1; else { if (y == 0) y=1; else { ratio=y/x; y=x*ratio*2; } } somevector= vec_create(x, y, A.z); normalise(&somevector); return vec_crossproduct(A,somevector); } /* Perform the multiplication of a matrix A and a vector B where A is the first argument and B is the second argument. The routine will return AxB*/ struct vector matrix_vec_multiply(double A[3][3],struct vector B) { int i; double vecarr[3]; struct vector AB,RA; struct vector vec_createarr(double[3]); double vec_dotproduct(struct vector,struct vector); for (i=0;i<3;i++) { /* index the row vector from A*/ RA=vec_createarr(A[i]); /* Now find the dot product of this row with B*/ vecarr[i]=vec_dotproduct(RA,B); } AB=vec_createarr(vecarr); return AB; } /* Distance between two vectors*/ double vec_distance(struct vector vec1,struct vector vec2) { double sum; sum= (vec1.x-vec2.x)*(vec1.x-vec2.x)+(vec1.y-vec2.y)*(vec1.y-vec2.y)+(vec1.z-vec2.z)*(vec1.z-vec2.z); return pow(sum,0.5); } /* Vector size */ double vec_size(struct vector vec) { double size; size=sqrt(vec.x*vec.x+ vec.y*vec.y+ vec.z*vec.z); return size; } /* Normalize a vector*/ struct vector vec_normalize(struct vector vec) { double mag; struct vector newvec; double vec_size(struct vector); mag= vec_size (vec); mag=1/mag; newvec.x=vec.x*mag; newvec.y=vec.y*mag; newvec.z=vec.z*mag; return newvec; } /* Scale a vector */ struct vector vec_scale(struct vector vec, double scale) { vec.x=vec.x*scale; vec.y=vec.y*scale; vec.z=vec.z*scale; return vec; } /* cross_product*/ struct vector vec_crossproduct(struct vector A,struct vector B) { struct vector cp; cp.x=( A.y*B.z - A.z*B.y); cp.y=( -A.x*B.z + A.z*B.x); cp.z=( A.x*B.y - A.y*B.x); return cp; } /* addition of vectors*/ inline struct vector vec_sum(struct vector A,struct vector B) { struct vector C; C.x=(A.x + B.x); C.y=(A.y + B.y); C.z=(A.z + B.z); return C; } /* subtraction of vectors*/ inline struct vector vec_sub(struct vector A,struct vector B) { struct vector C; C.x=(A.x - B.x); C.y=(A.y - B.y); C.z=(A.z - B.z); return C; } /* asign vlues of vector A by values in vector B*/ inline void vec_asign(struct vector *A, struct vector B) { (*A).x=B.x; (*A).y=B.y; (*A).z=B.z; } /* generate random unit vector*/ struct vector vec_random(void) { struct vector newvec; struct vector ranvec(void); newvec=ranvec(); return newvec; } /*generate random unit quaternion*/ struct quat quat_random(void) { double cosv, sinv; struct quat newquat; struct vector newaxis; struct vector ranvec(void); /* generate quaternion for rotation*/ newaxis = ranvec(); /*random axes for rotation*/ cosv = cos(PIH * ran2(&seed) ); if (ran2(&seed) <0.5) sinv = sqrt(1.0 - cosv*cosv); else sinv = -sqrt(1.0 - cosv*cosv); newquat.w=cosv; newquat.x=newaxis.x*sinv; newquat.y=newaxis.y*sinv; newquat.z=newaxis.z*sinv; return newquat; } /* Create quaternion for rotation around vector "vec" of angle in degrees "angle" function need cos of half angle and its sin*/ struct quat quat_create(struct vector vec, double vc, double vs) { struct quat newquat; newquat.w=vc; newquat.x=vec.x*vs; newquat.y=vec.y*vs; newquat.z=vec.z*vs; return newquat; } /*rotate vector with quaternion*/ void vec_rotate(struct vector *vec, struct quat quat) { double t2,t3,t4,t5,t6,t7,t8,t9,t10,newx,newy,newz; /* t1 = quat.w * quat.w; */ t2 = quat.w * quat.x; t3 = quat.w * quat.y; t4 = quat.w * quat.z; t5 = -quat.x * quat.x; t6 = quat.x * quat.y; t7 = quat.x * quat.z; t8 = -quat.y * quat.y; t9 = quat.y * quat.z; t10 = -quat.z * quat.z; newx = 2.0 * ( (t8+t10)*(*vec).x + (t6-t4)*(*vec).y + (t3+t7)*(*vec).z ) + (*vec).x; newy = 2.0 * ( (t4+t6)*(*vec).x + (t5+t10)*(*vec).y + (t9-t2)*(*vec).z ) + (*vec).y; newz = 2.0 * ( (t7-t3)*(*vec).x + (t2+t9)*(*vec).y + (t5+t8)*(*vec).z ) + (*vec).z; (*vec).x = newx; (*vec).y = newy; (*vec).z = newz; } /* rotate spherocylinder by quaternion of random axis and angle smaller than maxcos(cosine of angle half), we do everything on site for speed */ void psc_rotate(struct particles *psc, double max_angle,int geotype) { double vc, vs, t2, t3, t4, t5, t6, t7, t8, t9, t10; double d1, d2, d3, d4, d5, d6, d7, d8, d9 , newx, newy, newz; int k,m; struct quat newquat; struct vector newaxis; struct vector ranvec(void); /* generate quaternion for rotation*/ newaxis = ranvec(); /*random axes for rotation*/ // maxcos = cos(maxorient/2/180*PI); // vc = maxcos + ran2(&seed)*(1-maxcos); /*cos of angle must be bigger than maxcos and smaller than one*/ vc = cos(max_angle * ran2(&seed) ); if (ran2(&seed) <0.5) vs = sqrt(1.0 - vc*vc); else vs = -sqrt(1.0 - vc*vc); /*randomly choose orientation of direction of rotation clockwise or counterclockwise*/ newquat.w=vc; newquat.x=newaxis.x*vs; newquat.y=newaxis.y*vs; newquat.z=newaxis.z*vs; /* do quaternion rotation*/ t2 = newquat.w * newquat.x; t3 = newquat.w * newquat.y; t4 = newquat.w * newquat.z; t5 = -newquat.x * newquat.x; t6 = newquat.x * newquat.y; t7 = newquat.x * newquat.z; t8 = -newquat.y * newquat.y; t9 = newquat.y * newquat.z; t10 = -newquat.z * newquat.z; d1 = t8 + t10; d2 = t6 - t4; d3 = t3 + t7; d4 = t4 + t6; d5 = t5 + t10; d6 = t9 - t2; d7 = t7 - t3; d8 = t2 + t9; d9 = t5 + t8; /*rotate spherocylinder direction vector*/ newx = 2.0 * ( d1*psc->dir.x + d2*psc->dir.y + d3*psc->dir.z ) + psc->dir.x; newy = 2.0 * ( d4*psc->dir.x + d5*psc->dir.y + d6*psc->dir.z ) + psc->dir.y; newz = 2.0 * ( d7*psc->dir.x + d8*psc->dir.y + d9*psc->dir.z ) + psc->dir.z; psc->dir.x = newx; psc->dir.y = newy; psc->dir.z = newz; m=1; if ( (geotype != SCN) && (geotype != SCA) ) { if ( (geotype == TPSC) || (geotype == TCPSC) || (geotype == TCHPSC) || (geotype == TCHCPSC) ) m=2; for (k=0;k<m;k++) { /*rotate patch direction vector*/ newx = 2.0 * ( d1*psc->patchdir[k].x + d2*psc->patchdir[k].y + d3*psc->patchdir[k].z ) + psc->patchdir[k].x; newy = 2.0 * ( d4*psc->patchdir[k].x + d5*psc->patchdir[k].y + d6*psc->patchdir[k].z ) + psc->patchdir[k].y; newz = 2.0 * ( d7*psc->patchdir[k].x + d8*psc->patchdir[k].y + d9*psc->patchdir[k].z ) + psc->patchdir[k].z; psc->patchdir[k].x = newx; psc->patchdir[k].y = newy; psc->patchdir[k].z = newz; /*rotate patch sides vectors*/ newx = 2.0 * ( d1*psc->patchsides[0+2*k].x + d2*psc->patchsides[0+2*k].y + d3*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].x; newy = 2.0 * ( d4*psc->patchsides[0+2*k].x + d5*psc->patchsides[0+2*k].y + d6*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].y; newz = 2.0 * ( d7*psc->patchsides[0+2*k].x + d8*psc->patchsides[0+2*k].y + d9*psc->patchsides[0+2*k].z ) + psc->patchsides[0+2*k].z; psc->patchsides[0+2*k].x = newx; psc->patchsides[0+2*k].y = newy; psc->patchsides[0+2*k].z = newz; newx = 2.0 * ( d1*psc->patchsides[1+2*k].x + d2*psc->patchsides[1+2*k].y + d3*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].x; newy = 2.0 * ( d4*psc->patchsides[1+2*k].x + d5*psc->patchsides[1+2*k].y + d6*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].y; newz = 2.0 * ( d7*psc->patchsides[1+2*k].x + d8*psc->patchsides[1+2*k].y + d9*psc->patchsides[1+2*k].z ) + psc->patchsides[1+2*k].z; psc->patchsides[1+2*k].x = newx; psc->patchsides[1+2*k].y = newy; psc->patchsides[1+2*k].z = newz; } } m=1; if ( (geotype == CHPSC) || (geotype == CHCPSC) || (geotype == TCHPSC) || (geotype == TCHCPSC) ) { if ( (geotype == TCHPSC) || (geotype == TCHCPSC) ) m=2; for (k=0;k<m;k++) { /*rotate chiral direction vector*/ newx = 2.0 * ( d1*psc->chdir[k].x + d2*psc->chdir[k].y + d3*psc->chdir[k].z ) + psc->chdir[k].x; newy = 2.0 * ( d4*psc->chdir[k].x + d5*psc->chdir[k].y + d6*psc->chdir[k].z ) + psc->chdir[k].y; newz = 2.0 * ( d7*psc->chdir[k].x + d8*psc->chdir[k].y + d9*psc->chdir[k].z ) + psc->chdir[k].z; psc->chdir[k].x = newx; psc->chdir[k].y = newy; psc->chdir[k].z = newz; } } } /*returns a position of center of mass of system*/ void masscenter(long npart, struct ia_param ia_params[MAXT][MAXT], struct conf * conf) { long i; double anint(double); conf->syscm.x = 0; conf->syscm.y = 0; conf->syscm.z = 0; for (i=0; i<npart; i++) { /*using periodic boundary conditions*/ conf->syscm.x += (conf->particle[i].pos.x - anint(conf->particle[i].pos.x) ) * ia_params[conf->particle[i].type][conf->particle[i].type].volume; conf->syscm.y += (conf->particle[i].pos.y - anint(conf->particle[i].pos.y) ) * ia_params[conf->particle[i].type][conf->particle[i].type].volume; conf->syscm.z += (conf->particle[i].pos.z - anint(conf->particle[i].pos.z) ) * ia_params[conf->particle[i].type][conf->particle[i].type].volume; } conf->syscm.x /= conf->sysvolume; conf->syscm.y /= conf->sysvolume; conf->syscm.z /= conf->sysvolume; return; } /* rotate cluster of particles by quaternion of random axis and angle smaller than maxcos(cosine of angle half), we do everything on site for speed */ void cluster_rotate(long target, struct vector gc, double max_angle, struct topo * topo, struct conf * conf) { long current,i; double vc,vs; //double quatsize; struct quat newquat; struct vector newaxis; struct vector ranvec(void); void vec_rotate(struct vector *, struct quat); // create rotation quaternion newaxis = ranvec(); /*random axes for rotation*/ // maxcos = cos(maxorient/2/180*PI); //vc = maxcos + ran2(&seed)*(1-maxcos); /*cos of angle must be bigger than maxcos and smaller than one*/ vc = cos(max_angle * ran2(&seed) ); if (ran2(&seed) <0.5) vs = sqrt(1.0 - vc*vc); else vs = -sqrt(1.0 - vc*vc); /*randomly choose orientation of direction of rotation clockwise or counterclockwise*/ newquat.w=vc; newquat.x=newaxis.x*vs; newquat.y=newaxis.y*vs; newquat.z=newaxis.z*vs; //quatsize=sqrt(newquat.w*newquat.w+newquat.x*newquat.x+newquat.y*newquat.y+newquat.z*newquat.z); //shift position to geometrical center i=0; current = topo->chainlist[target][0]; while (current >=0 ) { //shift position to geometrical center conf->particle[current].pos.x -= gc.x; conf->particle[current].pos.y -= gc.y; conf->particle[current].pos.z -= gc.z; //scale things by box not to have them distorted conf->particle[current].pos.x *= conf->box.x; conf->particle[current].pos.y *= conf->box.y; conf->particle[current].pos.z *= conf->box.z; //do rotation vec_rotate(&conf->particle[current].pos, newquat); vec_rotate(&conf->particle[current].dir, newquat); vec_rotate(&conf->particle[current].patchdir[0], newquat); vec_rotate(&conf->particle[current].patchdir[1], newquat); vec_rotate(&conf->particle[current].chdir[0], newquat); vec_rotate(&conf->particle[current].chdir[1], newquat); vec_rotate(&conf->particle[current].patchsides[0], newquat); vec_rotate(&conf->particle[current].patchsides[1], newquat); vec_rotate(&conf->particle[current].patchsides[2], newquat); vec_rotate(&conf->particle[current].patchsides[3], newquat); //sclae back conf->particle[current].pos.x /= conf->box.x; conf->particle[current].pos.y /= conf->box.y; conf->particle[current].pos.z /= conf->box.z; //shift positions back conf->particle[current].pos.x += gc.x; conf->particle[current].pos.y += gc.y; conf->particle[current].pos.z += gc.z; i++; current = topo->chainlist[target][i]; } } /* put the particle in the original box using periodic boundary conditions in our system the particle positions are scaled by box size so to get them into original obx is to get htem between 0 and 1 and then scale this back by size of box*/ void origbox(struct vector *pos,struct vector box) { double anint(double); (*pos).x = box.x * ((*pos).x - anint((*pos).x)); (*pos).y = box.y * ((*pos).y - anint((*pos).y)); (*pos).z = box.z * ((*pos).z - anint((*pos).z)); } /* use of periodic boundary conditions*/ void usepbc(struct vector *pos,struct vector pbc) { do { (*pos).x += pbc.x; } while ((*pos).x < 0.0); do { (*pos).x -= pbc.x; } while ((*pos).x > pbc.x); do { (*pos).y += pbc.y; } while ((*pos).y < 0.0); do { (*pos).y -= pbc.y; } while ((*pos).y > pbc.y); do { (*pos).z += pbc.z; } while ((*pos).z < 0.0); do { (*pos).z -= pbc.z; } while ((*pos).z > pbc.z); } /*..............................................................................*/ /*.......................TEMPLATE FILES.........................................*/ /*..............................................................................*/ /* # Template for the "options" file. Options start with an '#'. # Pressure couplings: # 0 = anisotropic coupling, 1 = isotropic coupling, 2 = isotropic in xy z=const, 3 = isotropic # xy and keep Volume constant # Wang-Landau method: (with constant decrease of bias addition by factor of 2, until less than WL_ALPHATOL) # O = none, 1 = z-direction of 1st paticle, 2 = hole in xyplane, 3 = z-orientation of 0th particle # 4 = distance of first two particles, 5 = pore around z axis and above CM, 6 = pore around z axis and above 0th particle # 7 = number of particles in contact (within distance sqrt(WL_CONTACTS)) ptype = 1 # Pressure coupling type (0-anisotropic xyz, 1-isotropic xyz, 2 - isotropic in xy z=const, 3 - isotropic in xy and V=const) press = 1 # Pressure paralpress = 1 # Parallel pressure for replica exchange shave = 0 # Average number of volume change attempts per sweep (usually 1) nequil = 0 # Number of equilibration sweeps adjust = 0 # Number of equilibration sweeps between step size adjustments nsweeps = 1000000 # Number of production sweeps paramfrq = 1000000 # Number of sweeps between order parameter samples report = 1000000 # Number of sweeps between statistics reports nrepchange = 1000 # Number of sweeps between replica exchanges movie = 100000 # Number of sweeps between movie frames (0 = no movie) chainprob = 0.0 # Probability of chain move attempts per sweep ( 0.25/number of particles in chain) transmx = 0.212 # Initial maximum displacement rotmx = 7.5 # Initial maximum orientation change (degrees) edge_mx = 0.0 # Initial maximum box length change chainmmx = 0.0 # Initial maximum chain displacement chainrmx = 0.0 # Initial maximum chain rotation change (degrees) temper = 1.0 # Temperature in units kT/e paraltemper = 1.5 # Temperature for parallel tempering in kT/e wlm = 0 # Wang-Landau method wlmtype = 0 # For which atomic type (from top.init) should the Wang-Landau method be calculated? switchprob = 0.0016 # Probability of type switch attempts per sweep pairlist_update = 8 # Number of sweeps after which the pairlist should be updated seed = 1 # Random number seed write_cluster = 10000 # Number of sweeps per writing out cluster info # End of the file */ /* Example of 'Config.init' file, but you must delete comments... there are only number in configuration file #box 10.0 10.0 10.0 #particles (x,y,z) (direction_x,direction_y, direction_z) (patchdirection_x,patchdirection_y,patchdirection_z) (switched) */ /* Template for the topology file 'top.init'. ( "\\" is symbol for line continue, "#" is symbol for comment, "[" is starting sign for keyword, "]" is ending sign for kyeword ) There are three keywords, types, molecules, and system. They should be given in this order. TYPES: spherocylinders SC - purely repulsive spherocylinder with WCA potential on closest distance SCA - isotropic cos^2 potential is acting isotropicaly dependent only on closest distance between spherocylinders.. PSC - Attractive potential in limited to an angular wedge on spherocylinder. Patch goes all the way through, making also hemispherical caps on end attractive CPSC - Attractive potential in limited to an angular wedge on cylindrical part of spherocylinders. The hemispherical caps on ends are repulsive spheres (T)(CH)PSC - T adds second patch, CH - adds chirality SP - purely repulsive shpere with WCA potential on closest distance SPA - isotropic cos^2 potential is acting isotropicaly dependent only on closest distance between obejcts [Types] # NAME NUMBER GEOTYPE EPSILON SIGMA ATTRACTION_DIST ATTRACTION_SWITCH PATCH_ANGLE PATCH_SWITCH SC_LENGTH (Optional second patch: PATCH_ROTATION PATCH_ANGLE PATCH_SWITCH )CHIRAL_ANGLE Prot1 1 PSC 1 1.2 1.346954458 1.0 80.0 5.0 3 Prot2 2 PSC 1 1.2 1.346954458 1.0 170.0 5.0 3 Prot3 3 CHCPSC 1 1.2 1.346954458 1.0 170.0 5.0 3 10 Prot4 4 TCHCPSC 1 1.2 1.346954458 1.0 170.0 5.0 3 90.0 90.0 5.0 10 [Molecules] # Molecules letter # bond1 - harmonic bond between nearest neighbours (end points for spherocylinders) (first constant then eq distance) # bond2 - harmonic bond between second nearest neighbours (their center of mass) (first constant then eq distance) # bondd - directional harmonic bond between nearest neighbours (end point of the second spherocylinder is attached to the point of bondlength extension of the first spherocylinder) (first constant then eq distance) # angle1 - angle between two spherocylinders -nearest neighbours (first constant then eq degrees 0-180.0) # angle2 - angle between two spherocylinder patches -nearest neighbours (first constant then eq degrees 0-180.0) # particles - types as they go in chain in molecule A: { #what: TYPE SWITCHTYPE DELTA_MU particles: 1 2 0.5 particles: 2 } B: { particles: 1 particles: 2 1 0.3 } [System] A 2 B 2 [EXTER] # wall interaction # THICKNESS EPSILON ATTRACTION_SWITCH 5.0 1.0 1.0 [EXCLUDE] #set pair types for which attraction will be excluded (reversepair is automaticaly added) 1 2 1 3 */
subopt.c
/* * suboptimal folding - Stefan Wuchty, Walter Fontana & Ivo Hofacker * * Vienna RNA package */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <ctype.h> #include <string.h> #include <math.h> #include "ViennaRNA/fold.h" #include "ViennaRNA/constraints/hard.h" #include "ViennaRNA/constraints/soft.h" #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/utils/strings.h" #include "ViennaRNA/params/default.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/datastructures/lists.h" #include "ViennaRNA/eval.h" #include "ViennaRNA/params/basic.h" #include "ViennaRNA/loops/all.h" #include "ViennaRNA/cofold.h" #include "ViennaRNA/gquad.h" #include "ViennaRNA/alphabet.h" #include "ViennaRNA/subopt.h" /* hack */ #include "ViennaRNA/color_output.inc" #ifdef _OPENMP #include <omp.h> #endif #define true 1 #define false 0 #ifndef ON_SAME_STRAND #define ON_SAME_STRAND(I, J, C) (((I) >= (C)) || ((J) < (C))) #endif /** * @brief Sequence interval stack element used in subopt.c */ typedef struct INTERVAL { int i; int j; int array_flag; } INTERVAL; typedef struct { char *structure; LIST *Intervals; int partial_energy; int is_duplex; /* int best_energy; */ /* best attainable energy */ } STATE; typedef struct { LIST *Intervals; LIST *Stack; int nopush; } subopt_env; struct old_subopt_dat { unsigned long max_sol; unsigned long n_sol; SOLUTION *SolutionList; FILE *fp; int cp; }; /* ################################# # GLOBAL VARIABLES # ################################# */ PUBLIC int subopt_sorted = 0; /* output sorted by energy */ PUBLIC int density_of_states[MAXDOS + 1]; PUBLIC double print_energy = 9999; /* printing threshold for use with logML */ /* ################################# # PRIVATE VARIABLES # ################################# */ /* some backward compatibility stuff */ PRIVATE int backward_compat = 0; PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL; #ifdef _OPENMP #pragma omp threadprivate(backward_compat_compound, backward_compat) #endif /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY PRIVATE SOLUTION * wrap_subopt(char *seq, char *structure, vrna_param_t *parameters, int delta, int is_constrained, int is_circular, FILE *fp); #endif PRIVATE void make_pair(int i, int j, STATE *state); /* mark a gquadruplex in the resulting dot-bracket structure */ PRIVATE void make_gquad(int i, int L, int l[3], STATE *state); PRIVATE INTERVAL * make_interval(int i, int j, int ml); PRIVATE STATE * make_state(LIST *Intervals, char *structure, int partial_energy, int is_duplex, int length); PRIVATE STATE * copy_state(STATE *state); PRIVATE void print_state(STATE *state); PRIVATE void UNUSED print_stack(LIST *list); PRIVATE LIST * make_list(void); PRIVATE void push(LIST *list, void *data); PRIVATE void *pop(LIST *list); PRIVATE int best_attainable_energy(vrna_fold_compound_t *vc, STATE *state); PRIVATE void scan_interval(vrna_fold_compound_t *vc, int i, int j, int array_flag, int threshold, STATE *state, subopt_env *env); PRIVATE void free_interval_node(INTERVAL *node); PRIVATE void free_state_node(STATE *node); PRIVATE void push_back(LIST *Stack, STATE *state); PRIVATE char * get_structure(STATE *state); PRIVATE int compare(const void *solution1, const void *solution2); PRIVATE int compare_en(const void *solution1, const void *solution2); PRIVATE void make_output(SOLUTION *SL, int cp, FILE *fp); PRIVATE void repeat(vrna_fold_compound_t *vc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env); PRIVATE void repeat_gquad(vrna_fold_compound_t *vc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env); PRIVATE void old_subopt_print(const char *structure, float energy, void *data); PRIVATE void old_subopt_store(const char *structure, float energy, void *data); PRIVATE void old_subopt_store_compressed(const char *structure, float energy, void *data); /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ /*---------------------------------------------------------------------------*/ /*List routines--------------------------------------------------------------*/ /*---------------------------------------------------------------------------*/ PRIVATE void make_pair(int i, int j, STATE *state) { state->structure[i - 1] = '('; state->structure[j - 1] = ')'; } PRIVATE void make_gquad(int i, int L, int l[3], STATE *state) { int x; for (x = 0; x < L; x++) { state->structure[i - 1 + x] = '+'; state->structure[i - 1 + x + L + l[0]] = '+'; state->structure[i - 1 + x + 2 * L + l[0] + l[1]] = '+'; state->structure[i - 1 + x + 3 * L + l[0] + l[1] + l[2]] = '+'; } } /*---------------------------------------------------------------------------*/ PRIVATE INTERVAL * make_interval(int i, int j, int array_flag) { INTERVAL *interval; interval = lst_newnode(sizeof(INTERVAL)); interval->i = i; interval->j = j; interval->array_flag = array_flag; return interval; } /*---------------------------------------------------------------------------*/ PRIVATE void free_interval_node(INTERVAL *node) { lst_freenode(node); } /*---------------------------------------------------------------------------*/ PRIVATE void free_state_node(STATE *node) { free(node->structure); if (node->Intervals) lst_kill(node->Intervals, lst_freenode); lst_freenode(node); } /*---------------------------------------------------------------------------*/ PRIVATE STATE * make_state(LIST *Intervals, char *structure, int partial_energy, int is_duplex, int length) { STATE *state; state = lst_newnode(sizeof(STATE)); if (Intervals) state->Intervals = Intervals; else state->Intervals = lst_init(); if (structure) { state->structure = structure; } else { int i; state->structure = (char *)vrna_alloc(length + 1); for (i = 0; i < length; i++) state->structure[i] = '.'; } state->partial_energy = partial_energy; return state; } /*---------------------------------------------------------------------------*/ PRIVATE STATE * copy_state(STATE *state) { STATE *new_state; void *after; INTERVAL *new_interval, *next; new_state = lst_newnode(sizeof(STATE)); new_state->Intervals = lst_init(); new_state->partial_energy = state->partial_energy; /* new_state->best_energy = state->best_energy; */ if (state->Intervals->count) { after = LST_HEAD(new_state->Intervals); for (next = lst_first(state->Intervals); next; next = lst_next(next)) { new_interval = lst_newnode(sizeof(INTERVAL)); *new_interval = *next; lst_insertafter(new_state->Intervals, new_interval, after); after = new_interval; } } new_state->structure = strdup(state->structure); if (!new_state->structure) vrna_message_error("out of memory"); return new_state; } /*---------------------------------------------------------------------------*/ /*@unused @*/ PRIVATE void print_state(STATE *state) { INTERVAL *next; if (state->Intervals->count) { printf("%d intervals:\n", state->Intervals->count); for (next = lst_first(state->Intervals); next; next = lst_next(next)) printf("[%d,%d],%d ", next->i, next->j, next->array_flag); printf("\n"); } printf("partial structure: %s\n", state->structure); printf("\n"); printf(" partial_energy: %d\n", state->partial_energy); /* printf(" best_energy: %d\n", state->best_energy); */ (void)fflush(stdout); } /*---------------------------------------------------------------------------*/ /*@unused @*/ PRIVATE void print_stack(LIST *list) { void *rec; printf("================\n"); printf("%d states\n", list->count); for (rec = lst_first(list); rec; rec = lst_next(rec)) { printf("state-----------\n"); print_state(rec); } printf("================\n"); } /*---------------------------------------------------------------------------*/ PRIVATE LIST * make_list(void) { return lst_init(); } /*---------------------------------------------------------------------------*/ PRIVATE void push(LIST *list, void *data) { lst_insertafter(list, data, LST_HEAD(list)); } /* PRIVATE void */ /* push_stack(STATE *state) { */ /* keep the stack sorted by energy */ /* STATE *after, *next; */ /* nopush = false; */ /* next = after = LST_HEAD(Stack); */ /* while ( next = lst_next(next)) { */ /* if ( next->best_energy >= state->best_energy ) break; */ /* after = next; */ /* } */ /* lst_insertafter(Stack, state, after); */ /* } */ /*---------------------------------------------------------------------------*/ PRIVATE void * pop(LIST *list) { void *data; data = lst_deletenext(list, LST_HEAD(list)); return data; } /*---------------------------------------------------------------------------*/ /*auxiliary routines---------------------------------------------------------*/ /*---------------------------------------------------------------------------*/ PRIVATE int best_attainable_energy(vrna_fold_compound_t *vc, STATE *state) { /* evaluation of best possible energy attainable within remaining intervals */ register int sum; INTERVAL *next; vrna_md_t *md; vrna_mx_mfe_t *matrices; int *indx; md = &(vc->params->model_details); matrices = vc->matrices; indx = vc->jindx; sum = state->partial_energy; /* energy of already found elements */ for (next = lst_first(state->Intervals); next; next = lst_next(next)) { if (next->array_flag == 0) sum += (md->circ) ? matrices->Fc : matrices->f5[next->j]; else if (next->array_flag == 1) sum += matrices->fML[indx[next->j] + next->i]; else if (next->array_flag == 2) sum += matrices->c[indx[next->j] + next->i]; else if (next->array_flag == 3) sum += matrices->fM1[indx[next->j] + next->i]; else if (next->array_flag == 4) sum += matrices->fc[next->i]; else if (next->array_flag == 5) sum += matrices->fc[next->j]; else if (next->array_flag == 6) sum += matrices->ggg[indx[next->j] + next->i]; } return sum; } /*---------------------------------------------------------------------------*/ PRIVATE void push_back(LIST *Stack, STATE *state) { push(Stack, copy_state(state)); return; } /*---------------------------------------------------------------------------*/ PRIVATE char * get_structure(STATE *state) { char *structure; structure = strdup(state->structure); return structure; } /*---------------------------------------------------------------------------*/ PRIVATE int compare(const void *solution1, const void *solution2) { if (((SOLUTION *)solution1)->energy > ((SOLUTION *)solution2)->energy) return 1; if (((SOLUTION *)solution1)->energy < ((SOLUTION *)solution2)->energy) return -1; return strcmp(((SOLUTION *)solution1)->structure, ((SOLUTION *)solution2)->structure); } PRIVATE int compare_en(const void *solution1, const void *solution2) { if (((SOLUTION *)solution1)->energy > ((SOLUTION *)solution2)->energy) return 1; if (((SOLUTION *)solution1)->energy < ((SOLUTION *)solution2)->energy) return -1; return 0; } /*---------------------------------------------------------------------------*/ PRIVATE void make_output(SOLUTION *SL, int cp, FILE *fp) /* prints stuff */ { SOLUTION *sol; for (sol = SL; sol->structure != NULL; sol++) { char *e_string = vrna_strdup_printf(" %6.2f", sol->energy); char *ss = vrna_db_unpack(sol->structure); char *s = vrna_cut_point_insert(ss, cp); print_structure(fp, s, e_string); free(s); free(ss); free(e_string); } } PRIVATE STATE * derive_new_state(int i, int j, STATE *s, int e, int flag) { STATE *s_new = copy_state(s); INTERVAL *ival = make_interval(i, j, flag); push(s_new->Intervals, ival); s_new->partial_energy += e; return s_new; } PRIVATE void fork_state(int i, int j, STATE *s, int e, int flag, subopt_env *env) { STATE *s_new = derive_new_state(i, j, s, e, flag); push(env->Stack, s_new); env->nopush = false; } PRIVATE void fork_int_state(int i, int j, int p, int q, STATE *s, int e, subopt_env *env) { STATE *s_new = derive_new_state(p, q, s, e, 2); make_pair(i, j, s_new); make_pair(p, q, s_new); push(env->Stack, s_new); env->nopush = false; } PRIVATE void fork_state_pair(int i, int j, STATE *s, int e, subopt_env *env) { STATE *new_state; new_state = copy_state(s); make_pair(i, j, new_state); new_state->partial_energy += e; push(env->Stack, new_state); env->nopush = false; } PRIVATE void fork_two_states_pair(int i, int j, int k, STATE *s, int e, int flag1, int flag2, subopt_env *env) { INTERVAL *interval1, *interval2; STATE *new_state; new_state = copy_state(s); interval1 = make_interval(i + 1, k - 1, flag1); interval2 = make_interval(k, j - 1, flag2); if (k - i < j - k) { /* push larger interval first */ push(new_state->Intervals, interval1); push(new_state->Intervals, interval2); } else { push(new_state->Intervals, interval2); push(new_state->Intervals, interval1); } make_pair(i, j, new_state); new_state->partial_energy += e; push(env->Stack, new_state); env->nopush = false; } PRIVATE void fork_two_states(int i, int j, int p, int q, STATE *s, int e, int flag1, int flag2, subopt_env *env) { INTERVAL *interval1, *interval2; STATE *new_state; new_state = copy_state(s); interval1 = make_interval(i, j, flag1); interval2 = make_interval(p, q, flag2); if ((j - i) < (q - p)) { push(new_state->Intervals, interval1); push(new_state->Intervals, interval2); } else { push(new_state->Intervals, interval2); push(new_state->Intervals, interval1); } new_state->partial_energy += e; push(env->Stack, new_state); env->nopush = false; } /*---------------------------------------------------------------------------*/ /* start of subopt backtracking ---------------------------------------------*/ /*---------------------------------------------------------------------------*/ PUBLIC SOLUTION * vrna_subopt(vrna_fold_compound_t *vc, int delta, int sorted, FILE *fp) { struct old_subopt_dat data; vrna_subopt_callback *cb; data.SolutionList = NULL; data.max_sol = 128; data.n_sol = 0; data.fp = fp; data.cp = vc->cutpoint; if (vc) { /* SolutionList stores the suboptimal structures found */ data.SolutionList = (SOLUTION *)vrna_alloc(data.max_sol * sizeof(SOLUTION)); /* end initialize ------------------------------------------------------- */ if (fp) { float min_en; char *SeQ, *energies = NULL; if (vc->strands > 1) min_en = vrna_mfe_dimer(vc, NULL); else min_en = vrna_mfe(vc, NULL); SeQ = vrna_cut_point_insert(vc->sequence, vc->cutpoint); energies = vrna_strdup_printf(" %6.2f %6.2f", min_en, (float)delta / 100.); print_structure(fp, SeQ, energies); free(SeQ); free(energies); vrna_mx_mfe_free(vc); } cb = old_subopt_store; if (fp) cb = (sorted) ? old_subopt_store_compressed : old_subopt_print; /* call subopt() */ vrna_subopt_cb(vc, delta, cb, (void *)&data); if (sorted) { /* sort structures by energy */ if (data.n_sol > 0) { int (*compare_fun)(const void *a, const void *b); switch (sorted) { case VRNA_SORT_BY_ENERGY_ASC: compare_fun = compare_en; break; default: /* a.k.a. VRNA_SORT_BY_ENERGY_LEXICOGRAPHIC_ASC */ compare_fun = compare; break; } qsort(data.SolutionList, data.n_sol - 1, sizeof(SOLUTION), compare_fun); } if (fp) make_output(data.SolutionList, vc->cutpoint, fp); } if (fp) { /* we've printed everything -- free solutions */ SOLUTION *sol; for (sol = data.SolutionList; sol->structure != NULL; sol++) free(sol->structure); free(data.SolutionList); data.SolutionList = NULL; } } return data.SolutionList; } PUBLIC void vrna_subopt_cb(vrna_fold_compound_t *vc, int delta, vrna_subopt_callback *cb, void *data) { subopt_env *env; STATE *state; INTERVAL *interval; unsigned int *so, *ss, *se; int maxlevel, count, partial_energy, old_dangles, logML, dangle_model, length, circular, threshold; double structure_energy, min_en, eprint; char *struc, *structure; float correction; vrna_param_t *P; vrna_md_t *md; int minimal_energy; int Fc; int *f5; vrna_fold_compound_prepare(vc, VRNA_OPTION_MFE | VRNA_OPTION_HYBRID); length = vc->length; so = vc->strand_order; ss = vc->strand_start; se = vc->strand_end; P = vc->params; md = &(P->model_details); /* do mfe folding to get fill arrays and get ground state energy */ /* in case dangles is neither 0 or 2, set dangles=2 while folding */ circular = md->circ; logML = md->logML; old_dangles = dangle_model = md->dangles; if (md->uniq_ML != 1) /* failsafe mechanism to enforce valid fM1 array */ md->uniq_ML = 1; /* temporarily set dangles to 2 if necessary */ if ((md->dangles != 0) && (md->dangles != 2)) md->dangles = 2; struc = (char *)vrna_alloc(sizeof(char) * (length + 1)); if (circular) { min_en = vrna_mfe(vc, struc); Fc = vc->matrices->Fc; f5 = vc->matrices->f5; /* restore dangle model */ md->dangles = old_dangles; /* re-evaluate in case we're using logML etc */ min_en = vrna_eval_structure(vc, struc); } else { min_en = vrna_mfe_dimer(vc, struc); f5 = vc->matrices->f5; /* restore dangle model */ md->dangles = old_dangles; /* re-evaluate in case we're using logML etc */ min_en = vrna_eval_structure(vc, struc); } free(struc); eprint = print_energy + min_en; correction = (min_en < 0) ? -0.1 : 0.1; /* Initialize ------------------------------------------------------------ */ maxlevel = 0; count = 0; partial_energy = 0; /* Initialize the stack ------------------------------------------------- */ minimal_energy = (circular) ? Fc : f5[length]; threshold = minimal_energy + delta; if (threshold >= INF) { vrna_message_warning("Energy range too high, limiting to reasonable value"); threshold = INF - EMAX; } /* init env data structure */ env = (subopt_env *)vrna_alloc(sizeof(subopt_env)); env->Stack = NULL; env->nopush = true; env->Stack = make_list(); /* anchor */ env->Intervals = make_list(); /* initial state: */ interval = make_interval(1, length, 0); /* interval [1,length,0] */ push(env->Intervals, interval); env->nopush = false; state = make_state(env->Intervals, NULL, partial_energy, 0, length); /* state->best_energy = minimal_energy; */ push(env->Stack, state); env->nopush = false; /* end initialize ------------------------------------------------------- */ while (1) { /* forever, til nothing remains on stack */ maxlevel = (env->Stack->count > maxlevel ? env->Stack->count : maxlevel); if (LST_EMPTY(env->Stack)) { /* we are done! clean up and quit */ /* fprintf(stderr, "maxlevel: %d\n", maxlevel); */ lst_kill(env->Stack, free_state_node); cb(NULL, 0, data); /* NULL (last time to call callback function */ break; } /* pop the last element ---------------------------------------------- */ state = pop(env->Stack); /* current state to work with */ if (LST_EMPTY(state->Intervals)) { int e; /* state has no intervals left: we got a solution */ count++; structure = get_structure(state); structure_energy = state->partial_energy / 100.; #ifdef CHECK_ENERGY structure_energy = vrna_eval_structure(vc, structure); if (!logML) if ((double)(state->partial_energy / 100.) != structure_energy) { vrna_message_error("%s %6.2f %6.2f", structure, state->partial_energy / 100., structure_energy); exit(1); } #endif if (logML || (dangle_model == 1) || (dangle_model == 3)) /* recalc energy */ structure_energy = vrna_eval_structure(vc, structure); e = (int)((structure_energy - min_en) * 10. - correction); /* avoid rounding errors */ if (e > MAXDOS) e = MAXDOS; density_of_states[e]++; if (structure_energy <= eprint) { char *outstruct = vrna_cut_point_insert(structure, (vc->strands > 1) ? ss[so[1]] : -1); cb((const char *)outstruct, structure_energy, data); free(outstruct); } free(structure); } else { /* get (and remove) next interval of state to analyze */ interval = pop(state->Intervals); scan_interval(vc, interval->i, interval->j, interval->array_flag, threshold, state, env); free_interval_node(interval); /* free the current interval */ } free_state_node(state); /* free the current state */ } /* end of while (1) */ /* cleanup memory */ free(env); } PRIVATE void scan_interval(vrna_fold_compound_t *vc, int i, int j, int array_flag, int threshold, STATE *state, subopt_env *env) { /* real backtrack routine */ /* array_flag = 0: trace back in f5-array */ /* array_flag = 1: trace back in fML-array */ /* array_flag = 2: trace back in repeat() */ /* array_flag = 3: trace back in fM1-array */ STATE *new_state, *temp_state; INTERVAL *new_interval; vrna_param_t *P; vrna_md_t *md; register int k, fi, cij, ij; register int type; register int dangle_model; register int noLP; unsigned int *sn, *so, *ss, *se; int element_energy, best_energy; int *fc, *f5, *c, *fML, *fM1, *ggg; int FcH, FcI, FcM, *fM2; int length, *indx, *rtype, circular, with_gquad, turn; char *ptype; short *S1; unsigned char *hard_constraints, hc_decompose; vrna_hc_t *hc; vrna_sc_t *sc; length = vc->length; sn = vc->strand_number; so = vc->strand_order; ss = vc->strand_start; se = vc->strand_end; indx = vc->jindx; ptype = vc->ptype; S1 = vc->sequence_encoding; P = vc->params; md = &(P->model_details); rtype = &(md->rtype[0]); dangle_model = md->dangles; noLP = md->noLP; circular = md->circ; with_gquad = md->gquad; turn = md->min_loop_size; fc = vc->matrices->fc; f5 = vc->matrices->f5; c = vc->matrices->c; fML = vc->matrices->fML; fM1 = vc->matrices->fM1; ggg = vc->matrices->ggg; FcH = vc->matrices->FcH; FcI = vc->matrices->FcI; FcM = vc->matrices->FcM; fM2 = vc->matrices->fM2; hc = vc->hc; hard_constraints = hc->mx; sc = vc->sc; best_energy = best_attainable_energy(vc, state); /* .. on remaining intervals */ env->nopush = true; if ((i > 1) && (!array_flag)) vrna_message_error("Error while backtracking!"); if ((j < i + turn + 1) && ((sn[i] == so[1]) || (sn[j] == so[0]))) { /* minimal structure element */ if (array_flag == 0) /* do not forget to add f5[j], since it may contain pseudo energies from soft constraining */ state->partial_energy += f5[j]; if (env->nopush) { push_back(env->Stack, state); env->nopush = false; } return; } ij = indx[j] + i; /* 13131313131313131313131313131313131313131313131313131313131313131313131 */ if (array_flag == 3 || array_flag == 1) { /* array_flag = 3: interval i,j was generated during */ /* a multiloop decomposition using array fM1 in repeat() */ /* or in this block */ /* array_flag = 1: interval i,j was generated from a */ /* stack, bulge, or internal loop in repeat() */ /* or in this block */ if ((hc->up_ml[j]) && (((array_flag == 3) && (fM1[indx[j - 1] + i] != INF)) || (fML[indx[j - 1] + i] != INF))) { if (array_flag == 3) fi = fM1[indx[j - 1] + i] + P->MLbase; else fi = fML[indx[j - 1] + i] + P->MLbase; if (sc) { if (sc->energy_up) fi += sc->energy_up[j][1]; if (sc->f) fi += sc->f(i, j, i, j - 1, VRNA_DECOMP_ML_ML, sc->data); } if ((fi + best_energy <= threshold) && (sn[j - 1] == sn[j])) /* no basepair, nibbling of 3'-end */ fork_state(i, j - 1, state, P->MLbase, array_flag, env); } hc_decompose = hard_constraints[length * i + j]; if (hc_decompose & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) { /* i,j may pair */ cij = c[ij]; if (cij != INF) { type = vrna_get_ptype(ij, ptype); switch (dangle_model) { case 0: element_energy = E_MLstem(type, -1, -1, P); break; default: element_energy = E_MLstem(type, (((i > 1) && (sn[i - 1] == sn[i])) || circular) ? S1[i - 1] : -1, (((j < length) && (sn[j] == sn[j + 1])) || circular) ? S1[j + 1] : -1, P); break; } if (sc) { if (sc->f) element_energy += sc->f(i, j, i, j, VRNA_DECOMP_ML_STEM, sc->data); } cij += element_energy; if (cij + best_energy <= threshold) repeat(vc, i, j, state, element_energy, 0, best_energy, threshold, env); } } else if ((with_gquad) && (ggg[ij] != INF)) { element_energy = E_MLstem(0, -1, -1, P); cij = ggg[ij] + element_energy; if (cij + best_energy <= threshold) repeat_gquad(vc, i, j, state, element_energy, 0, best_energy, threshold, env); } } /* array_flag == 3 || array_flag == 1 */ /* 11111111111111111111111111111111111111111111111111111111111111111111111 */ if (array_flag == 1) { /* array_flag = 1: interval i,j was generated from a */ /* stack, bulge, or internal loop in repeat() */ /* or in this block */ int stopp, k1j; if ((sn[i - 1] == sn[i]) && (sn[j] == sn[j + 1])) { /*backtrack in FML only if multiloop is possible*/ for (k = i + turn + 1; k <= j - 1 - turn; k++) { /* Multiloop decomposition if i,j contains more than 1 stack */ if ((with_gquad) && (sn[k] == sn[k + 1]) && (fML[indx[k] + i] != INF) && (ggg[indx[j] + k + 1] != INF)) { element_energy = E_MLstem(0, -1, -1, P); if (fML[indx[k] + i] + ggg[indx[j] + k + 1] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(i, k, state, 0, array_flag); env->nopush = false; repeat_gquad(vc, k + 1, j, temp_state, element_energy, fML[indx[k] + i], best_energy, threshold, env); free_state_node(temp_state); } } k1j = indx[j] + k + 1; if ((hard_constraints[length * j + k + 1] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) && (fML[indx[k] + i] != INF) && (c[k1j] != INF)) { short s5, s3; type = vrna_get_ptype(k1j, ptype); switch (dangle_model) { case 0: s5 = s3 = -1; break; default: s5 = (sn[i - 1] == sn[i]) ? S1[k] : -1; s3 = (sn[j] == sn[j + 1]) ? S1[j + 1] : -1; break; } element_energy = E_MLstem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(i, j, k, k + 1, VRNA_DECOMP_ML_ML_STEM, sc->data); } if (sn[k] == sn[k + 1]) { if (fML[indx[k] + i] + c[k1j] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(i, k, state, 0, array_flag); env->nopush = false; repeat(vc, k + 1, j, temp_state, element_energy, fML[indx[k] + i], best_energy, threshold, env); free_state_node(temp_state); } } } } } if (vc->strands > 1) { stopp = se[so[0]] - 1; /*if cp -1: k on cut, => no ml*/ stopp = MIN2(stopp, j - 1 - turn); if (i > ss[so[1]]) stopp = j - 1 - turn; else if (i == ss[so[1]]) stopp = 0; /*not a multi loop*/ } else { stopp = j - 1 - turn; } int up = 1; for (k = i; k <= stopp; k++, up++) { if (hc->up_ml[i] >= up) { k1j = indx[j] + k + 1; /* Multiloop decomposition if i,j contains only 1 stack */ if ((with_gquad) && (ggg[k1j] != INF)) { element_energy = E_MLstem(0, -1, -1, P) + P->MLbase * up; if (sc) if (sc->energy_up) element_energy += sc->energy_up[i][up]; if (ggg[k1j] + element_energy + best_energy <= threshold) repeat_gquad(vc, k + 1, j, state, element_energy, 0, best_energy, threshold, env); } if ((hard_constraints[length * j + k + 1] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP_ENC) && (c[k1j] != INF)) { int s5, s3; type = vrna_get_ptype(k1j, ptype); switch (dangle_model) { case 0: s5 = s3 = -1; break; default: s5 = (sn[k - 1] == sn[k]) ? S1[k] : -1; s3 = (sn[j] == sn[j + 1]) ? S1[j + 1] : -1; break; } element_energy = E_MLstem(type, s5, s3, P); element_energy += P->MLbase * up; if (sc) { if (sc->energy_up) element_energy += sc->energy_up[i][up]; if (sc->f) element_energy += sc->f(i, j, k + 1, j, VRNA_DECOMP_ML_STEM, sc->data); } if (c[k1j] + element_energy + best_energy <= threshold) repeat(vc, k + 1, j, state, element_energy, 0, best_energy, threshold, env); } } } } /* array_flag == 1 */ /* 22222222222222222222222222222222222222222222222222 */ /* */ /* array_flag = 2: interval i,j was generated from a */ /* stack, bulge, or internal loop in repeat() */ /* */ /* 22222222222222222222222222222222222222222222222222 */ if (array_flag == 2) { repeat(vc, i, j, state, 0, 0, best_energy, threshold, env); if (env->nopush) if (!noLP) vrna_message_warning("%d,%d\nOops, no solution in repeat!", i, j); return; } /* 00000000000000000000000000000000000000000000000000 */ /* */ /* array_flag = 0: interval i,j was found while */ /* tracing back through f5-array and c-array */ /* or within this block */ /* */ /* 00000000000000000000000000000000000000000000000000 */ if ((array_flag == 0) && !circular) { int s5, s3, kj, tmp_en; if ((hc->up_ext[j]) && (f5[j - 1] != INF)) { tmp_en = 0; if (sc) { if (sc->energy_up) tmp_en += sc->energy_up[j][1]; if (sc->f) tmp_en += sc->f(1, j, 1, j - 1, VRNA_DECOMP_EXT_EXT, sc->data); } if (f5[j - 1] + tmp_en + best_energy <= threshold) /* no basepair, nibbling of 3'-end */ fork_state(i, j - 1, state, tmp_en, 0, env); } for (k = j - turn - 1; k > 1; k--) { kj = indx[j] + k; if ((with_gquad) && (sn[k] == sn[j]) && (f5[k - 1] != INF) && (ggg[kj] != INF)) { element_energy = 0; if (f5[k - 1] + ggg[kj] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(1, k - 1, state, 0, 0); env->nopush = false; /* backtrace the quadruplex */ repeat_gquad(vc, k, j, temp_state, element_energy, f5[k - 1], best_energy, threshold, env); free_state_node(temp_state); } } if ((hard_constraints[length * j + k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (f5[k - 1] != INF) && (c[kj] != INF)) { type = vrna_get_ptype(kj, ptype); /* k and j pair */ switch (dangle_model) { case 0: s5 = s3 = -1; break; default: s5 = (sn[k - 1] == sn[k]) ? S1[k - 1] : -1; s3 = ((j < length) && (sn[j] == sn[j + 1])) ? S1[j + 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sn[k] != sn[j]) /*&&(state->is_duplex==0))*/ element_energy += P->DuplexInit; /*state->is_duplex=1;*/ if (sc) { if (sc->f) element_energy += sc->f(1, j, k - 1, k, VRNA_DECOMP_EXT_EXT_STEM, sc->data); } if (f5[k - 1] + c[kj] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(1, k - 1, state, 0, 0); env->nopush = false; repeat(vc, k, j, temp_state, element_energy, f5[k - 1], best_energy, threshold, env); free_state_node(temp_state); } } } kj = indx[j] + 1; if ((with_gquad) && (sn[k] == sn[j]) && (ggg[kj] != INF)) { element_energy = 0; if (ggg[kj] + element_energy + best_energy <= threshold) /* backtrace the quadruplex */ repeat_gquad(vc, 1, j, state, element_energy, 0, best_energy, threshold, env); } if ((hard_constraints[length + j] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (c[kj] != INF)) { type = vrna_get_ptype(kj, ptype); s5 = -1; switch (dangle_model) { case 0: s3 = -1; break; default: s3 = (j < length) && (sn[j] == sn[j + 1]) ? S1[j + 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sn[1] != sn[j]) element_energy += P->DuplexInit; if (sc) { if (sc->f) element_energy += sc->f(1, j, 1, j, VRNA_DECOMP_EXT_STEM, sc->data); } if (c[kj] + element_energy + best_energy <= threshold) repeat(vc, 1, j, state, element_energy, 0, best_energy, threshold, env); } } /* end array_flag == 0 && !circular*/ /* or do we subopt circular? */ else if (array_flag == 0) { int k, l, p, q, tmp_en; /* if we've done everything right, we will never reach this case more than once */ /* right after the initilization of the stack with ([1,n], empty, 0) */ /* lets check, if we can have an open chain without breaking the threshold */ /* this is an ugly work-arround cause in case of an open chain we do not have to */ /* backtrack anything further... */ if (hc->up_ext[1] >= length) { tmp_en = 0; if (sc) { if (sc->energy_up) tmp_en += sc->energy_up[1][length]; if (sc->f) tmp_en += sc->f(1, j, 1, j, VRNA_DECOMP_EXT_UP, sc->data); } if (tmp_en <= threshold) { new_state = derive_new_state(1, 2, state, 0, 0); new_state->partial_energy = 0; push(env->Stack, new_state); env->nopush = false; } } /* ok, lets check if we can do an exterior hairpin without breaking the threshold */ /* best energy should be 0 if we are here */ if (FcH + best_energy <= threshold) { /* lets search for all exterior hairpin cases, that fit into our threshold barrier */ /* we use index k,l to avoid confusion with i,j index of our state... */ /* if we reach here, i should be 1 and j should be n respectively */ for (k = i; k < j; k++) { if (hc->up_hp[1] < k) break; for (l = j; l >= k + turn + 1; l--) { int kl, tmpE; kl = indx[l] + k; if (c[kl] != INF) { tmpE = vrna_E_hp_loop(vc, l, k); if (c[kl] + tmpE + best_energy <= threshold) { /* what we really have to do is something like this, isn't it? */ /* we have to create a new state, with interval [k,l], then we */ /* add our loop energy as initial energy of this state and put */ /* the state onto the stack R... for further refinement... */ /* we also denote this new interval to be scanned in C */ fork_state(k, l, state, tmpE, 2, env); } } } } } /* now lets see, if we can do an exterior interior loop without breaking the threshold */ if (FcI + best_energy <= threshold) { /* now we search for our exterior interior loop possibilities */ for (k = i; k < j; k++) { for (l = j; l >= k + turn + 1; l--) { int kl, type, tmpE; kl = indx[l] + k; /* just confusing these indices ;-) */ if ((hard_constraints[length * k + l] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) && (c[kl] != INF)) { type = rtype[vrna_get_ptype(kl, ptype)]; for (p = l + 1; p < j; p++) { int u1, qmin; u1 = p - l - 1; if (u1 + k - 1 > MAXLOOP) break; if (hc->up_int[l + 1] < u1) break; qmin = u1 + k - 1 + j - MAXLOOP; if (qmin < p + turn + 1) qmin = p + turn + 1; for (q = j; q >= qmin; q--) { int u2, type_2; if (hc->up_int[q + 1] < (j - q + k - 1)) break; if ((hard_constraints[length * p + q] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) && (c[indx[q] + p] != INF)) { type_2 = rtype[vrna_get_ptype(indx[q] + p, ptype)]; u2 = k - 1 + j - q; if (u1 + u2 > MAXLOOP) continue; tmpE = E_IntLoop(u1, u2, type, type_2, S1[l + 1], S1[k - 1], S1[p - 1], S1[q + 1], P); if (sc) { if (sc->energy_up) tmpE += sc->energy_up[l + 1][p - l - 1] + sc->energy_up[q + 1][j - q] + sc->energy_up[1][k - 1]; if (sc->energy_stack) { if (u1 + u2 == 0) { tmpE += sc->energy_stack[k] + sc->energy_stack[l] + sc->energy_stack[p] + sc->energy_stack[q]; } } } if (c[kl] + c[indx[q] + p] + tmpE + best_energy <= threshold) { /* ok, similar to the hairpin stuff, we add new states onto the stack R */ /* but in contrast to the hairpin decomposition, we have to add two new */ /* intervals, enclosed by k,l and p,q respectively and we also have to */ /* add the partial energy, that comes from the exterior interior loop */ fork_two_states(k, l, p, q, state, tmpE, 2, 2, env); } } } } } } } } /* and last but not least, we have a look, if we can do an exterior multiloop within the energy threshold */ if (FcM <= threshold) { /* this decomposition will be somehow more complicated...so lets see what we do here... */ /* first we want to find out which split inidices we can use without exceeding the threshold */ int tmpE2; for (k = turn + 1; k < j - 2 * turn; k++) { if ((fML[indx[k] + 1] != INF) && (fM2[k + 1] != INF)) { tmpE2 = fML[indx[k] + 1] + fM2[k + 1] + P->MLclosing; if (tmpE2 + best_energy <= threshold) { /* grmpfh, we have found a possible split index k so we have to split fM2 and fML now */ /* lets do it first in fM2 anyway */ for (l = k + turn + 2; l < j - turn - 1; l++) { tmpE2 = fM1[indx[l] + k + 1] + fM1[indx[j] + l + 1]; if (tmpE2 + fML[indx[k] + 1] + P->MLclosing <= threshold) { /* we've (hopefully) found a valid decomposition of fM2 and therefor we have all */ /* three intervals for our new state to be pushed on stack R */ new_state = copy_state(state); /* first interval leads for search in fML array */ new_interval = make_interval(1, k, 1); push(new_state->Intervals, new_interval); env->nopush = false; /* next, we have the first interval that has to be traced in fM1 */ new_interval = make_interval(k + 1, l, 3); push(new_state->Intervals, new_interval); env->nopush = false; /* and the last of our three intervals is also one to be traced within fM1 array... */ new_interval = make_interval(l + 1, j, 3); push(new_state->Intervals, new_interval); env->nopush = false; /* mmh, we add the energy for closing the multiloop now... */ new_state->partial_energy += P->MLclosing; /* next we push our state onto the R stack */ push(env->Stack, new_state); env->nopush = false; } /* else we search further... */ } /* ok, we have to decompose fML now... */ } } } } } /* thats all folks for the circular case... */ /* 44444444444444444444444444444444444444444444444444 */ /* */ /* array_flag = 4: interval i,j was found while */ /* tracing back through fc-array smaller than than cp */ /* or within this block */ /* */ /* 44444444444444444444444444444444444444444444444444 */ if (array_flag == 4) { int ik, s5, s3, tmp_en; if ((hc->up_ext[i]) && (fc[i + 1] != INF)) { tmp_en = 0; if (sc) { if (sc->energy_up) tmp_en += sc->energy_up[i][1]; if (sc->f) tmp_en += sc->f(i, j, i + 1, j, VRNA_DECOMP_EXT_EXT, sc->data); } if (fc[i + 1] + tmp_en + best_energy <= threshold) /* no basepair, nibbling of 5'-end */ fork_state(i + 1, j, state, tmp_en, 4, env); } for (k = i + turn + 1; k < j; k++) { ik = indx[k] + i; if ((with_gquad) && (fc[k + 1] != INF) && (ggg[ik] != INF)) { if (fc[k + 1] + ggg[ik] + best_energy <= threshold) { temp_state = derive_new_state(k + 1, j, state, 0, 4); env->nopush = false; repeat_gquad(vc, i, k, temp_state, 0, fc[k + 1], best_energy, threshold, env); free_state_node(temp_state); } } if ((hard_constraints[length * i + k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (fc[k + 1] != INF) && (c[ik] != INF)) { type = vrna_get_ptype(ik, ptype); switch (dangle_model) { case 0: s5 = s3 = -1; break; default: s5 = (i > 1) ? S1[i - 1] : -1; s3 = S1[k + 1]; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(i, j, k, k + 1, VRNA_DECOMP_EXT_STEM_EXT, sc->data); } if (fc[k + 1] + c[ik] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(k + 1, j, state, 0, 4); env->nopush = false; repeat(vc, i, k, temp_state, element_energy, fc[k + 1], best_energy, threshold, env); free_state_node(temp_state); } } } ik = indx[se[so[0]]] + i; /* indx[j] + i; */ if ((with_gquad) && (ggg[ik] != INF)) if (ggg[ik] + best_energy <= threshold) repeat_gquad(vc, i, se[so[0]], state, 0, 0, best_energy, threshold, env); if ((hard_constraints[length * i + se[so[0]]] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (c[ik] != INF)) { type = vrna_get_ptype(ik, ptype); s3 = -1; switch (dangle_model) { case 0: s5 = -1; break; default: s5 = (i > 1) ? S1[i - 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(i, se[so[0]], i, se[so[0]], VRNA_DECOMP_EXT_STEM, sc->data); } if (c[ik] + element_energy + best_energy <= threshold) repeat(vc, i, se[so[0]], state, element_energy, 0, best_energy, threshold, env); } } /* array_flag == 4 */ /* 55555555555555555555555555555555555555555555555555 */ /* */ /* array_flag = 5: interval cp=i,j was found while */ /* tracing back through fc-array greater than cp */ /* or within this block */ /* */ /* 55555555555555555555555555555555555555555555555555 */ if (array_flag == 5) { int kj, s5, s3, tmp_en; if ((hc->up_ext[j]) && (fc[j - 1] != INF)) { tmp_en = 0; if (sc) { if (sc->energy_up) tmp_en += sc->energy_up[j][1]; if (sc->f) tmp_en += sc->f(i, j, i, j - 1, VRNA_DECOMP_EXT_EXT, sc->data); } if (fc[j - 1] + tmp_en + best_energy <= threshold) /* no basepair, nibbling of 3'-end */ fork_state(i, j - 1, state, tmp_en, 5, env); } for (k = j - turn - 1; k > i; k--) { kj = indx[j] + k; if ((with_gquad) && (fc[k - 1] != INF) && (ggg[kj] != INF)) { if (fc[k - 1] + ggg[kj] + best_energy <= threshold) { temp_state = derive_new_state(i, k - 1, state, 0, 5); env->nopush = false; repeat_gquad(vc, k, j, temp_state, 0, fc[k - 1], best_energy, threshold, env); free_state_node(temp_state); } } if ((hard_constraints[length * j + k] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (fc[k - 1] != INF) && (c[kj] != INF)) { type = vrna_get_ptype(kj, ptype); element_energy = 0; switch (dangle_model) { case 0: s3 = s5 = -1; break; default: s5 = S1[k - 1]; s3 = (j < length) ? S1[j + 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(i, j, k - 1, k, VRNA_DECOMP_EXT_EXT_STEM, sc->data); } if (fc[k - 1] + c[kj] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(i, k - 1, state, 0, 5); env->nopush = false; repeat(vc, k, j, temp_state, element_energy, fc[k - 1], best_energy, threshold, env); free_state_node(temp_state); } } } kj = indx[j] + ss[so[1]]; /* indx[j] + i; */ if ((with_gquad) && (ggg[kj] != INF)) if (ggg[kj] + best_energy <= threshold) repeat_gquad(vc, ss[so[1]], j, state, 0, 0, best_energy, threshold, env); if ((hard_constraints[length * ss[so[1]] + j] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (c[kj] != INF)) { type = vrna_get_ptype(kj, ptype); s5 = -1; switch (dangle_model) { case 0: s3 = -1; break; default: s3 = (j < length) ? S1[j + 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc) { if (sc->f) element_energy += sc->f(ss[so[1]], j, ss[so[1]], j, VRNA_DECOMP_EXT_STEM, sc->data); } if (c[kj] + element_energy + best_energy <= threshold) repeat(vc, ss[so[1]], j, state, element_energy, 0, best_energy, threshold, env); } } /* array_flag == 5 */ if (array_flag == 6) { /* we have a gquad */ repeat_gquad(vc, i, j, state, 0, 0, best_energy, threshold, env); if (env->nopush) vrna_message_warning("%d,%d\nOops, no solution in gquad-repeat!", i, j); return; } if (env->nopush) { push_back(env->Stack, state); env->nopush = false; } return; } /*---------------------------------------------------------------------------*/ PRIVATE void repeat_gquad(vrna_fold_compound_t *vc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env) { unsigned int *sn; int *ggg, *indx, element_energy; short *S1; vrna_param_t *P; indx = vc->jindx; sn = vc->strand_number; ggg = vc->matrices->ggg; S1 = vc->sequence_encoding; P = vc->params; /* find all gquads that fit into the energy range and the interval [i,j] */ STATE *new_state; best_energy += part_energy; /* energy of current structural element */ best_energy += temp_energy; /* energy from unpushed interval */ if (sn[i] == sn[j]) { element_energy = ggg[indx[j] + i]; if ((element_energy != INF) && (element_energy + best_energy <= threshold)) { int cnt; int *L; int *l; /* find out how many gquads we might expect in the interval [i,j] */ int num_gquads = get_gquad_count(S1, i, j); num_gquads++; L = (int *)vrna_alloc(sizeof(int) * num_gquads); l = (int *)vrna_alloc(sizeof(int) * num_gquads * 3); L[0] = -1; get_gquad_pattern_exhaustive(S1, i, j, P, L, l, threshold - best_energy); for (cnt = 0; L[cnt] != -1; cnt++) { new_state = copy_state(state); make_gquad(i, L[cnt], &(l[3 * cnt]), new_state); new_state->partial_energy += part_energy; new_state->partial_energy += element_energy; /* new_state->best_energy = * hairpin[unpaired] + element_energy + best_energy; */ push(env->Stack, new_state); env->nopush = false; } free(L); free(l); } } best_energy -= part_energy; best_energy -= temp_energy; return; } PRIVATE void repeat(vrna_fold_compound_t *vc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env) { /* routine to find stacks, bulges, internal loops and multiloops */ /* within interval closed by basepair i,j */ STATE *new_state; vrna_param_t *P; vrna_md_t *md; register int ij, k, p, q, energy, new; register int mm; register int no_close, type, type_2; char *ptype; unsigned int n, *sn, *so, *ss, *se; int element_energy; int *fc, *c, *fML, *fM1, *ggg; int rt, *indx, *rtype, noGUclosure, noLP, with_gquad, dangle_model, turn; short *S1; vrna_hc_t *hc; vrna_sc_t *sc; n = vc->length; S1 = vc->sequence_encoding; ptype = vc->ptype; indx = vc->jindx; sn = vc->strand_number; so = vc->strand_order; ss = vc->strand_start; se = vc->strand_end; P = vc->params; md = &(P->model_details); rtype = &(md->rtype[0]); noGUclosure = md->noGUclosure; noLP = md->noLP; with_gquad = md->gquad; dangle_model = md->dangles; turn = md->min_loop_size; fc = vc->matrices->fc; c = vc->matrices->c; fML = vc->matrices->fML; fM1 = vc->matrices->fM1; ggg = vc->matrices->ggg; hc = vc->hc; sc = vc->sc; ij = indx[j] + i; type = vrna_get_ptype(ij, ptype); /* * if (type==0) fprintf(stderr, "repeat: Warning: %d %d can't pair\n", i,j); */ no_close = (((type == 3) || (type == 4)) && noGUclosure); if (hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) { if (noLP) { /* always consider the structure with additional stack */ if (i + turn + 2 < j) { if (hc->mx[n * (i + 1) + j - 1] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC) { type_2 = rtype[vrna_get_ptype(indx[j - 1] + i + 1, ptype)]; energy = 0; if ((sn[i] == sn[i + 1]) && (sn[j - 1] == sn[j])) { energy = E_IntLoop(0, 0, type, type_2, S1[i + 1], S1[j - 1], S1[i + 1], S1[j - 1], P); if (sc) { if (sc->energy_bp) energy += sc->energy_bp[ij]; if (sc->energy_stack) { energy += sc->energy_stack[i] + sc->energy_stack[i + 1] + sc->energy_stack[j - 1] + sc->energy_stack[j]; } if (sc->f) energy += sc->f(i, j, i + 1, j - 1, VRNA_DECOMP_PAIR_IL, sc->data); } new_state = derive_new_state(i + 1, j - 1, state, part_energy + energy, 2); make_pair(i, j, new_state); make_pair(i + 1, j - 1, new_state); /* new_state->best_energy = new + best_energy; */ push(env->Stack, new_state); env->nopush = false; if (i == 1 || state->structure[i - 2] != '(' || state->structure[j] != ')') /* adding a stack is the only possible structure */ return; } } } } } best_energy += part_energy; /* energy of current structural element */ best_energy += temp_energy; /* energy from unpushed interval */ if (hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) { for (p = i + 1; p <= MIN2(j - 2 - turn, i + MAXLOOP + 1); p++) { int minq = j - i + p - MAXLOOP - 2; if (minq < p + 1 + turn) minq = p + 1 + turn; if (hc->up_int[i + 1] < (p - i - 1)) break; for (q = j - 1; q >= minq; q--) { if (hc->up_int[q + 1] < (j - q - 1)) break; /* skip stack if noLP, since we've already processed it above */ if ((noLP) && (p == i + 1) && (q == j - 1)) continue; if (!(hc->mx[n * p + q] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC)) continue; if (c[indx[q] + p] == INF) continue; type_2 = vrna_get_ptype(indx[q] + p, ptype); if (noGUclosure) if (no_close || (type_2 == 3) || (type_2 == 4)) if ((p > i + 1) || (q < j - 1)) continue; /* continue unless stack */ if ((sn[i] == sn[p]) && (sn[q] == sn[j])) { energy = E_IntLoop(p - i - 1, j - q - 1, type, rtype[type_2], S1[i + 1], S1[j - 1], S1[p - 1], S1[q + 1], P); new = energy + c[indx[q] + p]; if (sc) { if (sc->energy_up) energy += sc->energy_up[i + 1][p - i - 1] + sc->energy_up[q + 1][j - q - 1]; if (sc->energy_bp) energy += sc->energy_bp[ij]; if (sc->energy_stack) { if ((p == i + 1) && (q == j - 1)) { energy += sc->energy_stack[i] + sc->energy_stack[p] + sc->energy_stack[q] + sc->energy_stack[j]; } } if (sc->f) energy += sc->f(i, j, p, q, VRNA_DECOMP_PAIR_IL, sc->data); } new = energy + c[indx[q] + p]; if (new + best_energy <= threshold) /* stack, bulge, or interior loop */ fork_int_state(i, j, p, q, state, part_energy + energy, env); } /*end of if block */ } /* end of q-loop */ } /* end of p-loop */ } if (sn[i] != sn[j]) { /*look in fc*/ if ((hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) && (fc[i + 1] != INF) && (fc[j - 1] != INF)) { rt = rtype[type]; element_energy = 0; switch (dangle_model) { case 0: element_energy = vrna_E_ext_stem(rt, -1, -1, P); break; default: element_energy = vrna_E_ext_stem(rt, (sn[j - 1] == sn[j]) ? S1[j - 1] : -1, (sn[i] == sn[i + 1]) ? S1[i + 1] : -1, P); break; } if (fc[i + 1] + fc[j - 1] + element_energy + best_energy <= threshold) fork_two_states_pair(i, j, ss[so[1]], state, part_energy + element_energy, 4, 5, env); } } mm = P->MLclosing; rt = rtype[type]; if ((hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_MB_LOOP) && ((vc->strands < 2) || ((i != se[so[0]]) && (j != ss[so[1]])))) { element_energy = mm; switch (dangle_model) { case 0: element_energy = E_MLstem(rt, -1, -1, P) + mm; break; default: element_energy = E_MLstem(rt, S1[j - 1], S1[i + 1], P) + mm; break; } if (sc) { if (sc->energy_bp) element_energy += sc->energy_bp[ij]; if (sc->f) element_energy += sc->f(i, j, i + 1, j - 1, VRNA_DECOMP_PAIR_ML, sc->data); } /* multiloop decomposition */ if ((sc) && (sc->f)) { for (k = i + turn + 2; k <= j - turn - 2; k++) { int eee = fML[indx[k - 1] + i + 1]; if ((eee != INF) && (fM1[indx[j - 1] + k] != INF)) { eee += fM1[indx[j - 1] + k] + best_energy; int aux_eee = element_energy + sc->f(i + 1, j - 1, k - 1, k, VRNA_DECOMP_ML_ML_ML, sc->data); if ((eee + aux_eee) <= threshold) fork_two_states_pair(i, j, k, state, part_energy + aux_eee, 1, 3, env); } } } else { for (k = i + turn + 2; k <= j - turn - 2; k++) { int eee = fML[indx[k - 1] + i + 1]; if ((eee != INF) && (fM1[indx[j - 1] + k] != INF)) { /* multiloop decomposition */ if ((eee + fM1[indx[j - 1] + k] + element_energy + best_energy) <= threshold) fork_two_states_pair(i, j, k, state, part_energy + element_energy, 1, 3, env); } } } } if (sn[i] == sn[j]) { if ((hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_HP_LOOP) && (!no_close)) { element_energy = vrna_E_hp_loop(vc, i, j); if (element_energy != INF) { if (element_energy + best_energy <= threshold) /* hairpin structure */ fork_state_pair(i, j, state, part_energy + element_energy, env); } } if (with_gquad) { /* now we have to find all loops where (i,j) encloses a gquad in an interior loops style */ int cnt, *p, *q, *en, tmp_en; p = q = en = NULL; en = E_GQuad_IntLoop_exhaustive(i, j, &p, &q, type, S1, ggg, threshold - best_energy, indx, P); for (cnt = 0; p[cnt] != -1; cnt++) { if ((hc->up_int[i + 1] >= p[cnt] - i - 1) && (hc->up_int[q[cnt] + 1] >= j - q[cnt] - 1)) { tmp_en = en[cnt]; if (sc) { if (sc->energy_bp) tmp_en += sc->energy_bp[ij]; if (sc->energy_up) tmp_en += sc->energy_up[i + 1][p[cnt] - i - 1] + sc->energy_up[q[cnt] + 1][j - q[cnt] - 1]; } new_state = derive_new_state(p[cnt], q[cnt], state, tmp_en + part_energy, 6); make_pair(i, j, new_state); /* new_state->best_energy = new + best_energy; */ push(env->Stack, new_state); env->nopush = false; } } free(en); free(p); free(q); } } best_energy -= part_energy; best_energy -= temp_energy; return; } PRIVATE void old_subopt_print(const char *structure, float energy, void *data) { struct old_subopt_dat *d = (struct old_subopt_dat *)data; if (structure && d->fp) { char *e_string = vrna_strdup_printf(" %6.2f", energy); print_structure(d->fp, structure, e_string); free(e_string); } } PRIVATE void old_subopt_store(const char *structure, float energy, void *data) { struct old_subopt_dat *d = (struct old_subopt_dat *)data; /* store solution */ if (d->n_sol + 1 == d->max_sol) { d->max_sol *= 2; d->SolutionList = (SOLUTION *)vrna_realloc(d->SolutionList, d->max_sol * sizeof(SOLUTION)); } if (structure) { d->SolutionList[d->n_sol].energy = energy; d->SolutionList[d->n_sol++].structure = strdup(structure); } else { d->SolutionList[d->n_sol].energy = 0; d->SolutionList[d->n_sol++].structure = NULL; } } PRIVATE void old_subopt_store_compressed(const char *structure, float energy, void *data) { struct old_subopt_dat *d = (struct old_subopt_dat *)data; /* store solution */ if (d->n_sol + 1 == d->max_sol) { d->max_sol *= 2; d->SolutionList = (SOLUTION *)vrna_realloc(d->SolutionList, d->max_sol * sizeof(SOLUTION)); } if (structure) { d->SolutionList[d->n_sol].energy = energy; if (d->cp > 0) { int cp = d->cp; char *s = vrna_cut_point_remove(structure, &cp); d->SolutionList[d->n_sol++].structure = vrna_db_pack(s); free(s); } else { d->SolutionList[d->n_sol++].structure = vrna_db_pack(structure); } } else { d->SolutionList[d->n_sol].energy = 0; d->SolutionList[d->n_sol++].structure = NULL; } } /*###########################################*/ /*# deprecated functions below #*/ /*###########################################*/ #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY PUBLIC SOLUTION * subopt(char *seq, char *structure, int delta, FILE *fp) { return wrap_subopt(seq, structure, NULL, delta, fold_constrained, 0, fp); } PUBLIC SOLUTION * subopt_circ(char *seq, char *structure, int delta, FILE *fp) { return wrap_subopt(seq, structure, NULL, delta, fold_constrained, 1, fp); } PUBLIC SOLUTION * subopt_par(char *seq, char *structure, vrna_param_t *parameters, int delta, int is_constrained, int is_circular, FILE *fp) { return wrap_subopt(seq, structure, parameters, delta, is_constrained, is_circular, fp); } PRIVATE SOLUTION * wrap_subopt(char *string, char *structure, vrna_param_t *parameters, int delta, int is_constrained, int is_circular, FILE *fp) { vrna_fold_compound_t *vc; vrna_param_t *P; char *seq; #ifdef _OPENMP /* Explicitly turn off dynamic threads */ omp_set_dynamic(0); #endif /* we need the parameter structure for hard constraints */ if (parameters) { P = vrna_params_copy(parameters); } else { vrna_md_t md; set_model_details(&md); md.temperature = temperature; P = vrna_params(&md); } P->model_details.circ = is_circular; P->model_details.uniq_ML = uniq_ML = 1; /* what about cofold sequences here? Is it safe to call the below cut_point_insert() ? */ /* dirty hack to reinsert the '&' according to the global variable 'cut_point' */ seq = vrna_cut_point_insert(string, cut_point); vc = vrna_fold_compound(seq, &(P->model_details), ((is_circular == 0) ? VRNA_OPTION_HYBRID : VRNA_OPTION_DEFAULT)); if (parameters) { /* replace params if necessary */ free(vc->params); vc->params = P; } else { free(P); } /* handle hard constraints in pseudo dot-bracket format if passed via simple interface */ if (is_constrained && structure) { unsigned int constraint_options = 0; constraint_options |= VRNA_CONSTRAINT_DB | VRNA_CONSTRAINT_DB_PIPE | VRNA_CONSTRAINT_DB_DOT | VRNA_CONSTRAINT_DB_X | VRNA_CONSTRAINT_DB_ANG_BRACK | VRNA_CONSTRAINT_DB_RND_BRACK | VRNA_CONSTRAINT_DB_INTRAMOL | VRNA_CONSTRAINT_DB_INTERMOL; vrna_constraints_add(vc, (const char *)structure, constraint_options); } if (backward_compat_compound && backward_compat) vrna_fold_compound_free(backward_compat_compound); backward_compat_compound = vc; backward_compat = 1; /* cleanup */ free(seq); return vrna_subopt(vc, delta, subopt_sorted, fp); } #endif /*---------------------------------------------------------------------------*/ /* Well, that is the end!----------------------------------------------------*/ /*---------------------------------------------------------------------------*/
GB_emult_08_phase0.c
//------------------------------------------------------------------------------ // GB_emult_08_phase0: find vectors of C to compute for C=A.*B or C<M>=A.*B //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // The eWise multiply of two matrices, C=A.*B, C<M>=A.*B, or C<!M>=A.*B starts // with this phase, which determines which vectors of C need to be computed. // On input, A and B are the two matrices being ewise multiplied, and M is the // optional mask matrix. If present, it is not complemented. // The M, A, and B matrices are sparse or hypersparse. C will be sparse // (if Ch is returned NULL) or hypersparse (if Ch is returned non-NULL). // Ch: the vectors to compute in C. Not allocated, but equal to either // A->h, B->h, or M->h, or NULL if C is not hypersparse. // C_to_A: if A is hypersparse, and Ch is not A->h, then C_to_A [k] = kA // if the kth vector j = Ch [k] is equal to Ah [kA]. If j does not appear // in A, then C_to_A [k] = -1. Otherwise, C_to_A is returned as NULL. // C is always hypersparse in this case. // C_to_B: if B is hypersparse, and Ch is not B->h, then C_to_B [k] = kB // if the kth vector j = Ch [k] is equal to Bh [kB]. If j does not appear // in B, then C_to_B [k] = -1. Otherwise, C_to_B is returned as NULL. // C is always hypersparse in this case. // C_to_M: if M is hypersparse, and Ch is not M->h, then C_to_M [k] = kM // if the kth vector j = GBH (Ch, k) is equal to Mh [kM]. // If j does not appear in M, then C_to_M [k] = -1. Otherwise, C_to_M is // returned as NULL. C is always hypersparse in this case. // FUTURE:: exploit A==M, B==M, and A==B aliases #include "GB_emult.h" GrB_Info GB_emult_08_phase0 // find vectors in C for C=A.*B or C<M>=A.*B ( int64_t *p_Cnvec, // # of vectors to compute in C const int64_t *restrict *Ch_handle, // Ch is M->h, A->h, B->h, or NULL size_t *Ch_size_handle, int64_t *restrict *C_to_M_handle, // C_to_M: size Cnvec, or NULL size_t *C_to_M_size_handle, int64_t *restrict *C_to_A_handle, // C_to_A: size Cnvec, or NULL size_t *C_to_A_size_handle, int64_t *restrict *C_to_B_handle, // C_to_B: size Cnvec, or NULL size_t *C_to_B_size_handle, int *C_sparsity, // sparsity structure of C // original input: const GrB_Matrix M, // optional mask, may be NULL const GrB_Matrix A, const GrB_Matrix B, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- // M, A, and B can be jumbled for this phase ASSERT (p_Cnvec != NULL) ; ASSERT (Ch_handle != NULL) ; ASSERT (Ch_size_handle != NULL) ; ASSERT (C_to_A_handle != NULL) ; ASSERT (C_to_B_handle != NULL) ; ASSERT_MATRIX_OK_OR_NULL (M, "M for emult phase0", GB0) ; ASSERT (!GB_ZOMBIES (M)) ; ASSERT (GB_JUMBLED_OK (M)) ; // pattern not accessed ASSERT (!GB_PENDING (M)) ; ASSERT_MATRIX_OK (A, "A for emult phase0", GB0) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (GB_JUMBLED_OK (B)) ; // pattern not accessed ASSERT (!GB_PENDING (A)) ; ASSERT_MATRIX_OK (B, "B for emult phase0", GB0) ; ASSERT (!GB_ZOMBIES (B)) ; ASSERT (GB_JUMBLED_OK (A)) ; // pattern not accessed ASSERT (!GB_PENDING (B)) ; ASSERT (A->vdim == B->vdim) ; ASSERT (A->vlen == B->vlen) ; ASSERT (GB_IMPLIES (M != NULL, A->vdim == M->vdim)) ; ASSERT (GB_IMPLIES (M != NULL, A->vlen == M->vlen)) ; //-------------------------------------------------------------------------- // initializations //-------------------------------------------------------------------------- (*p_Cnvec) = 0 ; (*Ch_handle) = NULL ; (*Ch_size_handle) = 0 ; if (C_to_M_handle != NULL) { (*C_to_M_handle) = NULL ; } (*C_to_A_handle) = NULL ; (*C_to_B_handle) = NULL ; ASSERT ((*C_sparsity) == GxB_SPARSE || (*C_sparsity) == GxB_HYPERSPARSE) ; const int64_t *restrict Ch = NULL ; size_t Ch_size = 0 ; int64_t *restrict C_to_M = NULL ; size_t C_to_M_size = 0 ; int64_t *restrict C_to_A = NULL ; size_t C_to_A_size = 0 ; int64_t *restrict C_to_B = NULL ; size_t C_to_B_size = 0 ; //-------------------------------------------------------------------------- // get content of M, A, and B //-------------------------------------------------------------------------- int64_t n = A->vdim ; int64_t Anvec = A->nvec ; int64_t vlen = A->vlen ; const int64_t *restrict Ah = A->h ; bool A_is_hyper = (Ah != NULL) ; int64_t Bnvec = B->nvec ; const int64_t *restrict Bh = B->h ; bool B_is_hyper = (Bh != NULL) ; int64_t Mnvec = 0 ; const int64_t *restrict Mh = NULL ; bool M_is_hyper = false ; if (M != NULL) { Mnvec = M->nvec ; Mh = M->h ; M_is_hyper = (Mh != NULL) ; } //-------------------------------------------------------------------------- // determine how to construct the vectors of C //-------------------------------------------------------------------------- if (M != NULL) { //---------------------------------------------------------------------- // 8 cases to consider: A, B, M can each be hyper or sparse //---------------------------------------------------------------------- // Mask is present and not complemented if (A_is_hyper) { if (B_is_hyper) { if (M_is_hyper) { //---------------------------------------------------------- // (1) A hyper, B hyper, M hyper: C hyper //---------------------------------------------------------- // Ch = smaller of Mh, Bh, Ah int64_t nvec = GB_IMIN (Anvec, Bnvec) ; nvec = GB_IMIN (nvec, Mnvec) ; if (nvec == Anvec) { Ch = Ah ; Ch_size = A->h_size ; } else if (nvec == Bnvec) { Ch = Bh ; Ch_size = B->h_size ; } else // (nvec == Mnvec) { Ch = Mh ; Ch_size = M->h_size ; } } else { //---------------------------------------------------------- // (2) A hyper, B hyper, M sparse: C hyper //---------------------------------------------------------- // Ch = smaller of Ah, Bh if (Anvec <= Bnvec) { Ch = Ah ; Ch_size = A->h_size ; } else { Ch = Bh ; Ch_size = B->h_size ; } } } else { if (M_is_hyper) { //---------------------------------------------------------- // (3) A hyper, B sparse, M hyper: C hyper //---------------------------------------------------------- // Ch = smaller of Mh, Ah if (Anvec <= Mnvec) { Ch = Ah ; Ch_size = A->h_size ; } else { Ch = Mh ; Ch_size = M->h_size ; } } else { //---------------------------------------------------------- // (4) A hyper, B sparse, M sparse: C hyper //---------------------------------------------------------- Ch = Ah ; Ch_size = A->h_size ; } } } else { if (B_is_hyper) { if (M_is_hyper) { //---------------------------------------------------------- // (5) A sparse, B hyper, M hyper: C hyper //---------------------------------------------------------- // Ch = smaller of Mh, Bh if (Bnvec <= Mnvec) { Ch = Bh ; Ch_size = B->h_size ; } else { Ch = Mh ; Ch_size = M->h_size ; } } else { //---------------------------------------------------------- // (6) A sparse, B hyper, M sparse: C hyper //---------------------------------------------------------- Ch = Bh ; Ch_size = B->h_size ; } } else { if (M_is_hyper) { //---------------------------------------------------------- // (7) A sparse, B sparse, M hyper: C hyper //---------------------------------------------------------- Ch = Mh ; Ch_size = M->h_size ; } else { //---------------------------------------------------------- // (8) A sparse, B sparse, M sparse: C sparse //---------------------------------------------------------- Ch = NULL ; } } } } else { //---------------------------------------------------------------------- // 4 cases to consider: A, B can be hyper or sparse //---------------------------------------------------------------------- // Mask is not present, or present and complemented. if (A_is_hyper) { if (B_is_hyper) { //-------------------------------------------------------------- // (1) A hyper, B hyper: C hyper //-------------------------------------------------------------- // Ch = smaller of Ah, Bh if (Anvec <= Bnvec) { Ch = Ah ; Ch_size = A->h_size ; } else { Ch = Bh ; Ch_size = B->h_size ; } } else { //-------------------------------------------------------------- // (2) A hyper, B sparse: C hyper //-------------------------------------------------------------- Ch = Ah ; Ch_size = A->h_size ; } } else { if (B_is_hyper) { //-------------------------------------------------------------- // (3) A sparse, B hyper: C hyper //-------------------------------------------------------------- Ch = Bh ; Ch_size = B->h_size ; } else { //-------------------------------------------------------------- // (4) A sparse, B sparse: C sparse //-------------------------------------------------------------- Ch = NULL ; } } } //-------------------------------------------------------------------------- // find Cnvec //-------------------------------------------------------------------------- int64_t Cnvec ; if (Ch == NULL) { // C is sparse (*C_sparsity) = GxB_SPARSE ; Cnvec = n ; } else { // C is hypersparse; one of A, B, or M are hypersparse ASSERT (A_is_hyper || B_is_hyper || M_is_hyper) ; (*C_sparsity) = GxB_HYPERSPARSE ; if (Ch == Ah) { Cnvec = Anvec ; } else if (Ch == Bh) { Cnvec = Bnvec ; } else // (Ch == Mh) { Cnvec = Mnvec ; } } //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // construct C_to_M mapping //-------------------------------------------------------------------------- if (M_is_hyper && Ch != Mh) { // allocate C_to_M C_to_M = GB_MALLOC_WERK (Cnvec, int64_t, &C_to_M_size) ; if (C_to_M == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } // compute C_to_M ASSERT (Ch != NULL) ; const int64_t *restrict Mp = M->p ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < Cnvec ; k++) { int64_t pM, pM_end, kM = 0 ; int64_t j = Ch [k] ; GB_lookup (true, Mh, Mp, vlen, &kM, Mnvec-1, j, &pM, &pM_end) ; C_to_M [k] = (pM < pM_end) ? kM : -1 ; } } //-------------------------------------------------------------------------- // construct C_to_A mapping //-------------------------------------------------------------------------- if (A_is_hyper && Ch != Ah) { // allocate C_to_A C_to_A = GB_MALLOC_WERK (Cnvec, int64_t, &C_to_A_size) ; if (C_to_A == NULL) { // out of memory GB_FREE_WERK (&C_to_M, C_to_M_size) ; return (GrB_OUT_OF_MEMORY) ; } // compute C_to_A ASSERT (Ch != NULL) ; const int64_t *restrict Ap = A->p ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < Cnvec ; k++) { int64_t pA, pA_end, kA = 0 ; int64_t j = Ch [k] ; GB_lookup (true, Ah, Ap, vlen, &kA, Anvec-1, j, &pA, &pA_end) ; C_to_A [k] = (pA < pA_end) ? kA : -1 ; } } //-------------------------------------------------------------------------- // construct C_to_B mapping //-------------------------------------------------------------------------- if (B_is_hyper && Ch != Bh) { // allocate C_to_B C_to_B = GB_MALLOC_WERK (Cnvec, int64_t, &C_to_B_size) ; if (C_to_B == NULL) { // out of memory GB_FREE_WERK (&C_to_M, C_to_M_size) ; GB_FREE_WERK (&C_to_A, C_to_A_size) ; return (GrB_OUT_OF_MEMORY) ; } // compute C_to_B ASSERT (Ch != NULL) ; const int64_t *restrict Bp = B->p ; int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < Cnvec ; k++) { int64_t pB, pB_end, kB = 0 ; int64_t j = Ch [k] ; GB_lookup (true, Bh, Bp, vlen, &kB, Bnvec-1, j, &pB, &pB_end) ; C_to_B [k] = (pB < pB_end) ? kB : -1 ; } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- (*p_Cnvec) = Cnvec ; (*Ch_handle) = Ch ; (*Ch_size_handle) = Ch_size ; if (C_to_M_handle != NULL) { (*C_to_M_handle) = C_to_M ; (*C_to_M_size_handle) = C_to_M_size ; } (*C_to_A_handle) = C_to_A ; (*C_to_A_size_handle) = C_to_A_size ; (*C_to_B_handle) = C_to_B ; (*C_to_B_size_handle) = C_to_B_size ; //-------------------------------------------------------------------------- // The code below describes what the output contains: //-------------------------------------------------------------------------- #ifdef GB_DEBUG ASSERT (A != NULL) ; // A and B are always present ASSERT (B != NULL) ; int64_t jlast = -1 ; for (int64_t k = 0 ; k < Cnvec ; k++) { // C(:,j) is in the list, as the kth vector int64_t j ; if (Ch == NULL) { // C will be constructed as sparse j = k ; } else { // C will be constructed as hypersparse j = Ch [k] ; } // vectors j in Ch are sorted, and in the range 0:n-1 ASSERT (j >= 0 && j < n) ; ASSERT (j > jlast) ; jlast = j ; // see if A (:,j) exists if (C_to_A != NULL) { // A is hypersparse ASSERT (A_is_hyper) int64_t kA = C_to_A [k] ; ASSERT (kA >= -1 && kA < A->nvec) ; if (kA >= 0) { int64_t jA = A->h [kA] ; ASSERT (j == jA) ; } } else if (A_is_hyper) { // A is hypersparse, and Ch is a shallow copy of A->h ASSERT (Ch == A->h) ; } // see if B (:,j) exists if (C_to_B != NULL) { // B is hypersparse ASSERT (B_is_hyper) int64_t kB = C_to_B [k] ; ASSERT (kB >= -1 && kB < B->nvec) ; if (kB >= 0) { int64_t jB = B->h [kB] ; ASSERT (j == jB) ; } } else if (B_is_hyper) { // A is hypersparse, and Ch is a shallow copy of A->h ASSERT (Ch == B->h) ; } // see if M (:,j) exists if (Ch != NULL && M != NULL && Ch == M->h) { // Ch is the same as Mh ASSERT (M != NULL) ; ASSERT (M->h != NULL) ; ASSERT (Ch != NULL && M->h != NULL && Ch [k] == M->h [k]) ; ASSERT (C_to_M == NULL) ; } else if (C_to_M != NULL) { // M is present and hypersparse ASSERT (M != NULL) ; ASSERT (M->h != NULL) ; int64_t kM = C_to_M [k] ; ASSERT (kM >= -1 && kM < M->nvec) ; if (kM >= 0) { int64_t jM = M->h [kM] ; ASSERT (j == jM) ; } } else { // M is not present, or in sparse form ASSERT (M == NULL || M->h == NULL) ; } } #endif return (GrB_SUCCESS) ; }
GB_binop__first_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_fc32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__first_fc32) // A.*B function (eWiseMult): GB (_AemultB_03__first_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_fc32) // A*D function (colscale): GB (_AxD__first_fc32) // D*A function (rowscale): GB (_DxB__first_fc32) // C+=B function (dense accum): GB (_Cdense_accumB__first_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__first_fc32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_fc32) // C=scalar+B GB (_bind1st__first_fc32) // C=scalar+B' GB (_bind1st_tran__first_fc32) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: GxB_FC32_t // A type: GxB_FC32_t // B,b type: GxB_FC32_t // BinaryOp: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_FC32 || GxB_NO_FIRST_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__first_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_fc32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_fc32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__first_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__first_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__first_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = Ax [p] ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB (_bind1st_tran__first_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
openmp-ex34.c
/* ... and task parallelism */ #include <stdio.h> #include <stdlib.h> #include <omp.h> int main(void) { #pragma omp parallel { int id = omp_get_thread_num(); #pragma omp single { int i, count; printf("I am %d and I am the dispatcher\n",id); count = 0; for (i = 0; i < 20; i++) { if (rand() & 1) { #pragma omp task { printf("Iteration %d spawned task %d, picked up by %d\n",i,count,omp_get_thread_num()); } count++; } } } } return 0; }
depthwise_convolution_3x3.c
/* * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the License); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an AS IS BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* CSI-NN2 version 1.12.x */ #include "csi_c906.h" #ifndef DWCONV3X3S1 #define DWCONV3X3S1 csi_c906_dwconv3x3s1 #endif #ifndef DWCONV3X3S2 #define DWCONV3X3S2 csi_c906_dwconv3x3s2 #endif /* (1) Algorithm works as follows: out_h2: out_h2_w8_loop --> out_h2_w4 --> out_h2_wtail out_h_tail: out_h1_w8_loop --> out_h1_w4 --> out_h1_wtail out_h2_w8: out_h2_w4: || out_h1_w8: out_h1_w4: outptr0[0-7]: outptr1[0-7]: outptr0[0-3]: outptr1[0-3] || outptr0[0-7]: outptr0[0-3]: k00 * r0[0-7] k00 * r1[0-7] k00 * r0[0-3] k00 * r1[0-3] || k00 * r0[0-7] k00 * r0[0-3] k01 * r0[1-8] k01 * r1[1-8] k01 * r0[1-4] k01 * r1[1-4] || k01 * r0[1-8] k01 * r0[1-4] k02 * r0[2-9] k02 * r1[2-9] k02 * r0[2-5] k02 * r1[2-5] || k02 * r0[2-9] k02 * r0[2-5] k10 * r1[0-7] k10 * r2[0-7] k10 * r1[0-3] k10 * r2[0-3] || k10 * r1[0-7] k10 * r1[0-3] k11 * r1[1-8] k11 * r2[1-8] k11 * r1[1-4] k11 * r2[1-4] || k11 * r1[1-8] k11 * r1[1-4] k12 * r1[2-9] k12 * r2[2-9] k12 * r1[2-5] k12 * r2[2-5] || k12 * r1[2-9] k12 * r1[2-5] k20 * r2[0-7] k20 * r3[0-7] k20 * r2[0-3] k20 * r3[0-3] || k20 * r2[0-7] k20 * r2[0-3] k21 * r2[1-8] k21 * r3[1-8] k21 * r2[1-4] k21 * r3[1-4] || k21 * r2[1-8] k21 * r2[1-4] k22 * r2[2-9] k22 * r3[2-9] k22 * r2[2-5] k22 * r3[2-5] || k22 * r2[2-9] k22 * r2[2-5] h2_w8_loop execution process: load r0[0-7] --> load r0[1-8] --> load r0[2-9] --> // Load r0[0-7] r0[1-8] r0[-9] before the loop to facilitate pipeline work --> load bias0[0-7] --> load r3[0-7] --> load bias1[0-7] --> load r3[1-8] --> k00*r0[0-7] / k20*r3[0-7] --> - - load r3[2-9] --> k01*r0[1-8] / k21*r3[1-8] --> load r1[0-7] --> k02*r0[2-9] / k22*r3[2-9] --> load r1[1-8] --> k10*r1[0-7] / k00*r1[0-7] --> - - load r1[2-9] --> k11*r1[1-8] / k01*r1[1-8] --> load r2[0-7] --> k12*r1[2-9] / k02*r1[2-9] --> load r2[1-8] --> k20*r2[0-7] / k10*r2[0-7] --> - - load r2[2-9] --> k21*r2[1-8] / k11*r2[1-8] --> load r0[0-7] --> k22*r2[2-9] / k12*r2[2-9] --> load r0[1-8] --> load r0[2-9] ---------------- - - ----------------------------------------------------------------------------------------------------------------------------------------------------------- h1_w8_loop execution process: load r0[0-7] --> load r0[1-8] --> load r0[2-9] --> --> load bias0[0-7] --> k00*r0[0-7] --> load r1[0-7] --> k01*r0[1-8] --> load r1[1-8] --> k02*r0[2-9] --> load r1[2-9] --> k10*r1[0-7] --> - - load r2[0-7] --> k11*r1[1-8] --> load r2[1-8] --> k12*r1[2-9] --> load r2[2-9] --> k20*r2[0-7] --> load r0[0-7] --> k21*r2[1-8] --> - - load r0[1-8] --> k22*r2[2-9] --> load r0[2-9] ------------------------------------------------------------------------------------------------- - - -------------------------------------------------------------------------------------------------------------------------------------------------------- (2) register definition: t0: i_out_h t1-t2: i_out_w v0-v1: bias0[0-7], output_data(acc) v2-v3: bias1[0-7], output_data(acc) v4-v9: r0 v4,v5:r0[0-7] v6,v7:r0[1-8] v8,v9:r0[2-9] v10-v15: r3 v16-v21: r1 v22-v27: r2 ft0-ft8: [ k00,k01,k02,k10,k11,k12,k20,k21,k22 ] ft11: constant float 0.0f, used by fusing relu (3) // TODO: support channel mult ?? opt padding */ int DWCONV3X3S1(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { float *input_data = (float *)input->data; float *output_data = (float *)output->data; float *kernel_data = (float *)kernel->data; float *bias_data = (float *)bias->data; int32_t batch = input->dim[0]; int32_t in_c = input->dim[1]; // group = in_channel int32_t in_h = input->dim[2]; int32_t in_w = input->dim[3]; int32_t out_c = output->dim[1]; int32_t out_h = output->dim[2]; int32_t out_w = output->dim[3]; float *input_padd_buf = (float *)csi_mem_alloc(in_c * (in_h + params->pad_top + params->pad_down) * (in_w + params->pad_left + params->pad_right) * sizeof(float)); csi_c906_pad_input(input_data, input_padd_buf, in_c, in_h, in_w, in_h + params->pad_top + params->pad_down, in_w + params->pad_left + params->pad_right, params->pad_top, params->pad_left); in_h = in_h + params->pad_top + params->pad_down; in_w = in_w + params->pad_left + params->pad_right; #pragma omp parallel for num_threads(1) for (int c = 0; c < in_c; c++) { float *out = output_data + c * out_h * out_w; float *outptr0 = out; float *outptr1 = outptr0 + out_w; const float bias0 = bias_data ? bias_data[c] : 0.0f; const float *img0 = input_padd_buf + c * in_h * in_w; const float *r0 = img0; const float *r1 = r0 + in_w; const float *r2 = r1 + in_w; const float *r3 = r2 + in_w; const float *kernel0 = kernel_data + c * 9; #if __riscv_vector == 128 asm volatile( "vsetvli zero, zero, e32, m2\n\t" #ifdef FUSE_CONV_RELU "fmv.w.x ft11, zero\n\t" #endif // FUSE_CONV_RELU "flw ft0, 0(%0)\n\t" // k00 "flw ft1, 4(%0)\n\t" // k01 "flw ft2, 8(%0)\n\t" // k02 "flw ft3, 12(%0)\n\t" // k10 "flw ft4, 16(%0)\n\t" // k11 "flw ft5, 20(%0)\n\t" // k12 "flw ft6, 24(%0)\n\t" // k20 "flw ft7, 28(%0)\n\t" // k21 "flw ft8, 32(%0)\n\t" // k22 "srai t0, %7, 1\n\t" // t0 = out_h >> 1 "beqz t0, 7f\n\t" "1:\n\t" // out_h_loop2 "srai t1, %8, 3\n\t" // t1 = out_w >> 3 "beqz t1, 3f\n\t" "vsetvli zero, zero, e32, m2\n\t" // set vl = 8 "vlw.v v4, (%1)\n\t" // r0[0-7] "addi %1, %1, 4\n\t" // r0++ "vlw.v v6, (%1)\n\t" // r0[1-8] "addi %1, %1, 4\n\t" // r0++ "vlw.v v8, (%1)\n\t" // r0[2-9] "2:\n\t" // out_w_loop8 "vfmv.v.f v0, %20\n\t" // bias0[0-7] "addi %1, %1, 24\n\t" // r0 += 6 "vlw.v v10, (%4)\n\t" // r3[0-7] "addi %4, %4, 4\n\t" // r3++ "vfmv.v.f v2, %20\n\t" // bias1[0-7] "vlw.v v12, (%4)\n\t" // r3[1-8] "addi %4, %4, 4\n\t" // r3++ "vfmacc.vf v0, ft0, v4\n\t" // k00 * r0[0-7] "vfmacc.vf v2, ft6, v10\n\t" // k20 * r3[0-7] "vlw.v v14, (%4)\n\t" // r3[2-9] "addi %4, %4, 24\n\t" // r3 += 6 "vfmacc.vf v0, ft1, v6\n\t" // k01 * r0[1-8] "vfmacc.vf v2, ft7, v12\n\t" // k21 * r3[1-8] "vlw.v v16, (%2)\n\t" // r1[0-7] "addi %2, %2, 4\n\t" // r1++ "vfmacc.vf v0, ft2, v8\n\t" // k02 * r0[2-9] "vfmacc.vf v2, ft8, v14\n\t" // k22 * r3[2-9] "vlw.v v18, (%2)\n\t" // r1[1-8] "addi %2, %2, 4\n\t" // r1++ "vfmacc.vf v0, ft3, v16\n\t" // k10 * r1[0-7] "vfmacc.vf v2, ft0, v16\n\t" // k00 * r1[0-7] "vlw.v v20, (%2)\n\t" // r1[2-9] "addi %2, %2, 24\n\t" // r1 += 6 "vfmacc.vf v0, ft4, v18\n\t" // k11 * r1[1-8] "vfmacc.vf v2, ft1, v18\n\t" // k01 * r1[1-8] "vlw.v v22, (%3)\n\t" // r2[0-7] "addi %3, %3, 4\n\t" // r2++ "vfmacc.vf v0, ft5, v20\n\t" // k12 * r1[2-9] "vfmacc.vf v2, ft2, v20\n\t" // k02 * r1[2-9] "vlw.v v24, (%3)\n\t" // r2[1-8] "addi %3, %3, 4\n\t" // r2++ "vfmacc.vf v0, ft6, v22\n\t" // k20 * r2[0-7] "vfmacc.vf v2, ft3, v22\n\t" // k10 * r2[0-7] "vlw.v v26, (%3)\n\t" // r2[2-9] "addi %3, %3, 24\n\t" // r2 += 6 "vfmacc.vf v0, ft7, v24\n\t" // k21 * r2[1-8] "vfmacc.vf v2, ft4, v24\n\t" // k11 * r2[1-8] "vlw.v v4, (%1)\n\t" // r0[0-7] load r0 for next loop "addi %1, %1, 4\n\t" // r0++ "vfmacc.vf v0, ft8, v26\n\t" // k22 * r2[2-9] "vlw.v v6, (%1)\n\t" // r0[1-8] "addi %1, %1, 4\n\t" // r0++ #ifdef FUSE_CONV_RELU "vfmax.vf v0, v0, ft11\n\t" // **** relu **** #endif // FUSE_CONV_RELU "vsw.v v0, (%5)\n\t" // store line0 8 elements on outptr0 "addi %5, %5, 32\n\t" // outptr0 += 8 "vfmacc.vf v2, ft5, v26\n\t" // k12 * r2[2-9] "vlw.v v8, (%1)\n\t" // r0[2-9] #ifdef FUSE_CONV_RELU "vfmax.vf v2, v2, ft11\n\t" // **** relu **** #endif // FUSE_CONV_RELU "vsw.v v2, (%6)\n\t" // store line1 8 elements on outptr1 "addi %6, %6, 32\n\t" // outptr1 += 8 "addi t1, t1, -1\n\t" "bnez t1, 2b\n\t" "addi %1, %1, -8\n\t" // r0 -= 2 ********* bump r0 to origin addr ************ "3:\n\t" // out_w4 // h2循环中只有执行一次的机会 "andi t1, %8, 7\n\t" // t1 = out_w & 7 "srai t2, t1, 2\n\t" // t2 = (out_w & 7) >> 2 "beqz t2, 4f\n\t" "vsetvli zero, zero, e32, m1\n\t" // set vl = 4 "vlw.v v4, (%1)\n\t" // r0[0-3] "addi %1, %1, 4\n\t" // r0++ "vfmv.v.f v0, %20\n\t" // bias0[0-3] "vlw.v v10, (%4)\n\t" // r3[0-3] "addi %4, %4, 4\n\t" // r3++ "vfmv.v.f v2, %20\n\t" // bias1[0-3] "vlw.v v5, (%1)\n\t" // r0[1-4] "addi %1, %1, 4\n\t" // r0++ "vlw.v v11, (%4)\n\t" // r3[1-4] "addi %4, %4, 4\n\t" // r3++ "vfmacc.vf v0, ft0, v4\n\t" // k00 * r0[0-3] "vfmacc.vf v2, ft6, v10\n\t" // k20 * r3[0-3] "vlw.v v6, (%1)\n\t" // r0[2-5] "addi %1, %1, 8\n\t" // r0 += 2 "vlw.v v12, (%4)\n\t" // r3[2-5] "addi %4, %4, 8\n\t" // r3 += 2 "vfmacc.vf v0, ft1, v5\n\t" // k01 * r0[1-4] "vfmacc.vf v2, ft7, v11\n\t" // k21 * r3[1-4] "vlw.v v16, (%2)\n\t" // r1[0-3] "addi %2, %2, 4\n\t" // r1++ "vfmacc.vf v0, ft2, v6\n\t" // k02 * r0[2-5] "vfmacc.vf v2, ft8, v12\n\t" // k22 * r3[2-5] "vlw.v v17, (%2)\n\t" // r1[1-4] "addi %2, %2, 4\n\t" // r1++ "vfmacc.vf v0, ft3, v16\n\t" // k10 * r1[0-3] "vfmacc.vf v2, ft0, v16\n\t" // k00 * r1[0-3] "vlw.v v18, (%2)\n\t" // r1[2-5] "addi %2, %2, 8\n\t" // r1 += 2 "vfmacc.vf v0, ft4, v17\n\t" // k11 * r1[1-4] "vfmacc.vf v2, ft1, v17\n\t" // k01 * r1[1-4] "vlw.v v22, (%3)\n\t" // r2[0-3] "addi %3, %3, 4\n\t" // r2++ "vfmacc.vf v0, ft5, v18\n\t" // k12 * r1[2-5] "vfmacc.vf v2, ft2, v18\n\t" // k02 * r1[2-5]] "vlw.v v23, (%3)\n\t" // r2[1-4] "addi %3, %3, 4\n\t" // r2++ "vfmacc.vf v0, ft6, v22\n\t" // k20 * r2[0-3] "vfmacc.vf v2, ft3, v22\n\t" // k10 * r2[0-3] "vlw.v v24, (%3)\n\t" // r2[2-5] "addi %3, %3, 8\n\t" // r2 += 2 "vfmacc.vf v0, ft7, v23\n\t" // k21 * r2[1-4] "vfmacc.vf v2, ft4, v23\n\t" // k11 * r2[1-4] "vfmacc.vf v0, ft8, v24\n\t" // k22 * r2[2-5] "vfmacc.vf v2, ft5, v24\n\t" // k12 * r2[2-5] #ifdef FUSE_CONV_RELU "vfmax.vf v0, v0, ft11\n\t" // **** relu **** "vfmax.vf v2, v2, ft11\n\t" // **** relu **** #endif // FUSE_CONV_RELU "vsw.v v0, (%5)\n\t" // store line0 4 elements on outptr0 "addi %5, %5, 16\n\t" // outptr0 += 4 "vsw.v v2, (%6)\n\t" // store line1 4 elements on outptr1 "addi %6, %6, 16\n\t" // outptr1 += 4 "4:\n\t" // out_w_tail "andi t2, t1, 3\n\t" // t2 = (out_w & 7) & 3 "beqz t2, 6f\n\t" "vfmv.v.f v0, %20\n\t" // bias0[0-3] / bias1[0-3] "li t5, 3\n\t" "vsetvli zero, t5, e32, m1\n\t" // set vl = 3 "vlw.v v5, (%0)\n\t" // k0 "addi %0, %0, 12\n\t" "vlw.v v6, (%0)\n\t" // k1 "addi %0, %0, 12\n\t" "vlw.v v7, (%0)\n\t" // k2 "5:\n\t" // out_w_tail "vlw.v v4, (%1)\n\t" // r0 "addi %1, %1, 4\n\t" // r0++ "vlw.v v16, (%2)\n\t" // r1 "addi %2, %2, 4\n\t" // r1++ "vlw.v v22, (%3)\n\t" // r2 "addi %3, %3, 4\n\t" // r2++ "vlw.v v10, (%4)\n\t" // r3 "addi %4, %4, 4\n\t" // r3++ "vfmul.vv v8, v4, v5\n\t" // r0 * k0 "vfmacc.vv v8, v16, v6\n\t" // += r1 * k1 "vfmacc.vv v8, v22, v7\n\t" // += r2 * k2 "vfredsum.vs v11, v8, v0\n\t" // v11[0] = v0[0] + sum(v8[0..2]) "vfmv.f.s ft9, v11\n\t" // ft9 = v11[0] "vfmul.vv v9, v16, v5\n\t" // r1 * k0 "vfmacc.vv v9, v22, v6\n\t" // += r2 * k1 "vfmacc.vv v9, v10, v7\n\t" // += r3 * k2 "vfredsum.vs v12, v9, v0\n\t" // v12[0] = v0[0] + sum(v9[0..2]) "vfmv.f.s ft10, v12\n\t" // ft10 = v12[0] #ifdef FUSE_CONV_RELU "fmax.s ft9, ft9, ft11\n\t" // **** relu **** "fmax.s ft10, ft10, ft11\n\t" // **** relu **** #endif // FUSE_CONV_RELU "fsw ft9, 0(%5)\n\t" "addi %5, %5, 4\n\t" "fsw ft10, 0(%6)\n\t" "addi %6, %6, 4\n\t" "addi t2, t2, -1\n\t" "bnez t2, 5b\n\t" "addi %0, %0, -24\n\t" // kernel -= 6 ********* bump kernel_data to origin addr ************ "6:\n\t" // out_h_loop2 cnt "slli t3, %9, 2\n\t" // in_w * 4 "addi t3, t3, 8\n\t" // in_w * 4 + 8 "slli t4, %8, 2\n\t" // out_w * 4 "add %1, %1, t3\n\t" // r0 += 2 + in_w "add %2, %2, t3\n\t" // r1 += 2 + in_w "add %3, %3, t3\n\t" // r2 += 2 + in_w "add %4, %4, t3\n\t" // r3 += 2 + in_w "add %5, %5, t4\n\t" // outptr0 += out_w "add %6, %6, t4\n\t" // outptr1 += out_w "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "7:\n\t" // out_h_tail // 只有执行一次的机会 "andi t0, %7, 1\n\t" // t0 = out_h & 1 "beqz t0, 12f\n\t" "srai t1, %8, 3\n\t" // t1 = out_w >> 3 "beqz t1, 9f\n\t" "vsetvli zero, zero, e32, m2\n\t" // set vl = 8 "vlw.v v4, (%1)\n\t" // r0[0-7] "addi %1, %1, 4\n\t" // r0++ "vlw.v v6, (%1)\n\t" // r0[1-8] "addi %1, %1, 4\n\t" // r0++ "vlw.v v8, (%1)\n\t" // r0[2-9] "8:\n\t" // out_w_loop8 (可以考虑用m1,指令更多,但是还可以再错开,便于流水?) "vfmv.v.f v0, %20\n\t" // bias0[0-7] "addi %1, %1, 24\n\t" // r0 += 6 "vfmacc.vf v0, ft0, v4\n\t" // k00 * r0[0-7] "vlw.v v16, (%2)\n\t" // r1[0-7] "addi %2, %2, 4\n\t" // r1++ "vfmacc.vf v0, ft1, v6\n\t" // k01 * r0[1-8] "vlw.v v18, (%2)\n\t" // r1[1-8] "addi %2, %2, 4\n\t" // r1++ "vfmacc.vf v0, ft2, v8\n\t" // k02 * r0[2-9] "vlw.v v20, (%2)\n\t" // r1[2-9] "addi %2, %2, 24\n\t" // r1 += 6 "vfmacc.vf v0, ft3, v16\n\t" // k10 * r1[0-7] "vlw.v v22, (%3)\n\t" // r2[0-7] "addi %3, %3, 4\n\t" // r2++ "vfmacc.vf v0, ft4, v18\n\t" // k11 * r1[1-8] "vlw.v v24, (%3)\n\t" // r2[1-8] "addi %3, %3, 4\n\t" // r2++ "vfmacc.vf v0, ft5, v20\n\t" // k12 * r1[2-9] "vlw.v v26, (%3)\n\t" // r2[2-9] "addi %3, %3, 24\n\t" // r2 += 6 "vfmacc.vf v0, ft6, v22\n\t" // k20 * r2[0-7] "vlw.v v4, (%1)\n\t" // r0[0-7] "addi %1, %1, 4\n\t" // r0++ "vfmacc.vf v0, ft7, v24\n\t" // k21 * r2[1-8] "vlw.v v6, (%1)\n\t" // r0[1-8] "addi %1, %1, 4\n\t" // r0++ "vfmacc.vf v0, ft8, v26\n\t" // k22 * r2[2-9] "vlw.v v8, (%1)\n\t" // r0[2-9] #ifdef FUSE_CONV_RELU "vfmax.vf v0, v0, ft11\n\t" // **** relu **** #endif // FUSE_CONV_RELU "vsw.v v0, (%5)\n\t" // store line0 8 elements on outptr0 "addi %5, %5, 32\n\t" // outptr0 += 8 "addi t1, t1, -1\n\t" "bnez t1, 8b\n\t" "addi %1, %1, -8\n\t" // r0 -= 8 ********* bump r0 to origin addr ************ "9:\n\t" // out_w4 "andi t1, %8, 7\n\t" // t1 = out_w & 7 "srai t2, t1, 2\n\t" // t2 = (out_w & 7) >> 2 "beqz t2, 10f\n\t" "vsetvli zero, zero, e32, m1\n\t" // set vl = 4 "vlw.v v4, (%1)\n\t" // r0[0-3] "addi %1, %1, 4\n\t" // r0++ "vfmv.v.f v0, %20\n\t" // bias0[0-3] "vlw.v v5, (%1)\n\t" // r0[1-4] "addi %1, %1, 4\n\t" // r0++ "vfmacc.vf v0, ft0, v4\n\t" // k00 * r0[0-3] "vlw.v v6, (%1)\n\t" // r0[2-5] "addi %1, %1, 8\n\t" // r0 += 2 "vfmacc.vf v0, ft1, v5\n\t" // k01 * r0[1-4] "vlw.v v16, (%2)\n\t" // r1[0-3] "addi %2, %2, 4\n\t" // r1++ "vfmacc.vf v0, ft2, v6\n\t" // k02 * r0[2-5] "vlw.v v17, (%2)\n\t" // r1[1-4] "addi %2, %2, 4\n\t" // r1++ "vfmacc.vf v0, ft3, v16\n\t" // k10 * r1[0-3] "vlw.v v18, (%2)\n\t" // r1[2-5] "addi %2, %2, 8\n\t" // r1 += 2 "vfmacc.vf v0, ft4, v17\n\t" // k11 * r1[1-4] "vlw.v v22, (%3)\n\t" // r2[0-3] "addi %3, %3, 4\n\t" // r2++ "vfmacc.vf v0, ft5, v18\n\t" // k12 * r1[2-5] "vlw.v v23, (%3)\n\t" // r2[1-4] "addi %3, %3, 4\n\t" // r2++ "vfmacc.vf v0, ft6, v22\n\t" // k20 * r2[0-3] "vlw.v v24, (%3)\n\t" // r2[2-5] "addi %3, %3, 8\n\t" // r2 += 2 "vfmacc.vf v0, ft7, v23\n\t" // k21 * r2[1-4] "vfmacc.vf v0, ft8, v24\n\t" // k22 * r2[2-5] #ifdef FUSE_CONV_RELU "vfmax.vf v0, v0, ft11\n\t" // **** relu **** #endif // FUSE_CONV_RELU "vsw.v v0, (%5)\n\t" // store line0 4 elements on outptr0 "addi %5, %5, 16\n\t" // outptr0 += 4 "10:\n\t" // out_w_tail "andi t2, t1, 3\n\t" "beqz t2, 12f\n\t" "vfmv.v.f v0, %20\n\t" // bias0[0-3] "li t5, 3\n\t" "vsetvli zero, t5, e32, m1\n\t" // set vl = 3 "vlw.v v5, (%0)\n\t" // k0 "addi %0, %0, 12\n\t" "vlw.v v6, (%0)\n\t" // k1 "addi %0, %0, 12\n\t" "vlw.v v7, (%0)\n\t" // k2 "11:\n\t" // out_w_tail "vlw.v v4, (%1)\n\t" // r0 "addi %1, %1, 4\n\t" // r0++ "vlw.v v16, (%2)\n\t" // r1 "addi %2, %2, 4\n\t" // r1++ "vlw.v v22, (%3)\n\t" // r2 "addi %3, %3, 4\n\t" // r2++ "vfmul.vv v8, v4, v5\n\t" // r0 * k0 "vfmacc.vv v8, v16, v6\n\t" // += r1 * k1 "vfmacc.vv v8, v22, v7\n\t" // += r2 * k2 "vfredsum.vs v11, v8, v0\n\t" // v11[0] = v0[0] + sum(v8[0..2]) "vfmv.f.s ft9, v11\n\t" // ft9 = v11[0] #ifdef FUSE_CONV_RELU "fmax.s ft9, ft9, ft11\n\t" // **** relu **** #endif // FUSE_CONV_RELU "fsw ft9, 0(%5)\n\t" "addi %5, %5, 4\n\t" "addi t2, t2, -1\n\t" "bnez t2, 11b\n\t" "12:\n\t" // updata addr "addi %1, %1, 8\n\t" // r0 += 2 "addi %2, %2, 8\n\t" // r1 += 2 "addi %3, %3, 8\n\t" // r2 += 2 :"=r"(kernel0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(outptr0), // %5 "=r"(outptr1), // %6 "=r"(out_h), // %7 "=r"(out_w), // %8 "=r"(in_w) // %9 :"0"(kernel0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(outptr0), "6"(outptr1), "7"(out_h), "8"(out_w), "9"(in_w), "f"(bias0) // %20 :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", "ft8", "ft9", "ft10", "ft11", "t0", "t1", "t2", "t3", "t4", "t5" ); } #else const float *k0 = kernel0; const float *k1 = k0 + 3; const float *k2 = k1 + 3; int h = 0; for (; h + 1 < out_h; h += 2) { for (int w = 0; w < out_w; w++) { float sum0 = bias0; float sum1 = bias0; sum0 += r0[0] * k0[0] + r0[1] * k0[1] + r0[2] * k0[2]; sum0 += r1[0] * k1[0] + r1[1] * k1[1] + r1[2] * k1[2]; sum1 += r1[0] * k0[0] + r1[1] * k0[1] + r1[2] * k0[2]; sum0 += r2[0] * k2[0] + r2[1] * k2[1] + r2[2] * k2[2]; sum1 += r2[0] * k1[0] + r2[1] * k1[1] + r2[2] * k1[2]; sum1 += r3[0] * k2[0] + r3[1] * k2[1] + r3[2] * k2[2]; #ifdef FUSE_CONV_RELU sum0 = sum0 > 0 ? sum0 : 0; sum1 = sum1 > 0 ? sum1 : 0; #endif // FUSE_CONV_RELU *outptr0 = sum0; *outptr1 = sum1; r0++; r1++; r2++; r3++; outptr0++; outptr1++; } r0 += 2 + in_w; // jump to next line r1 += 2 + in_w; r2 += 2 + in_w; r3 += 2 + in_w; outptr0 += out_w; outptr1 += out_w; } for (; h < out_h; h++) { for (int w = 0; w < out_w; w++) { float sum0 = bias0; sum0 += r0[0] * k0[0] + r0[1] * k0[1] + r0[2] * k0[2]; sum0 += r1[0] * k1[0] + r1[1] * k1[1] + r1[2] * k1[2]; sum0 += r2[0] * k2[0] + r2[1] * k2[1] + r2[2] * k2[2]; #ifdef FUSE_CONV_RELU sum0 = sum0 > 0 ? sum0 : 0; #endif // FUSE_CONV_RELU *outptr0 = sum0; r0++; r1++; r2++; outptr0++; } r0 += 2; r1 += 2; r2 += 2; } } #endif // __riscv_vector csi_mem_free(input_padd_buf); return CSINN_TRUE; } /* (1) Algorithm works as follows: out_h1_loop: out_w4_loop --> out_w_tail k00*r00 k00*r02 k00*r04 k00*r06 k01*r01 k01*r03 k01*r05 k01*r07 k02*r02 k02*r04 k02*r06 k02*r08 ---------------------------------------- k10*r10 k10*r12 k10*r14 k10*r16 k11*r11 k11*r13 k11*r15 k11*r17 k12*r12 k12*r14 k12*r16 k12*r18 ---------------------------------------- k20*r20 k20*r22 k20*r24 k20*r26 k21*r21 k21*r23 k21*r25 k21*r27 k22*r22 k22*r24 k22*r26 k22*r28 计算 k * r 时可以用 .vv 也可以用 .vf (2) register definition: t0: i_out_h loop cnt t1-t2: i_out_w loop cnt t3: load stride 2 for r0-r2 t4: constant 3 for setting vl = 3 ft0: hold 1 output data ft1-ft9: [ k00, k01, k02, k10, k11, k12, k20, k21, k22 ] ft11: constant float 0.0f, used by fusing relu v0: bias, acc v4-v5: r0[0,2.4.6] r0[1,3,5,7] v1: r0[2,4,6,8] v6-v7: r1[0,2.4.6] r1[1,3,5,7] v2: r1[2,4,6,8] v8-v9: r2[0,2.4.6] r2[1,3,5,7] v3: r2[2,4,6,8] v10-v12: k0, k1, k2 v20-v21: [ acc(kx1*rx), acc(kx2*rx) ] (3) //TODO: support channel mult ?? Staggered instructions */ int DWCONV3X3S2(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { float *input_data = (float *)input->data; float *output_data = (float *)output->data; float *kernel_data = (float *)kernel->data; float *bias_data = (float *)bias->data; int32_t batch = input->dim[0]; int32_t in_c = input->dim[1]; // group = in_channel int32_t in_h = input->dim[2]; int32_t in_w = input->dim[3]; int32_t out_c = output->dim[1]; int32_t out_h = output->dim[2]; int32_t out_w = output->dim[3]; float *input_padd_buf = (float *)csi_mem_alloc(in_c * (in_h + params->pad_top + params->pad_down) * (in_w + params->pad_left + params->pad_right) * sizeof(float)); csi_c906_pad_input(input_data, input_padd_buf, in_c, in_h, in_w, in_h + params->pad_top + params->pad_down, in_w + params->pad_left + params->pad_right, params->pad_top, params->pad_left); in_h = in_h + params->pad_top + params->pad_down; in_w = in_w + params->pad_left + params->pad_right; int tailstep = in_w - 2 * out_w + in_w; #pragma omp parallel for num_threads(1) for (int c = 0; c < in_c; c++) { float *out = output_data + c * out_h * out_w; float *outptr0 = out; const float bias0 = bias_data ? bias_data[c] : 0.0f; const float *img0 = input_padd_buf + c * in_h * in_w; const float *r0 = img0; const float *r1 = r0 + in_w; const float *r2 = r1 + in_w; const float *kernel0 = kernel_data + c * 9; #if __riscv_vector == 128 asm volatile( "vsetvli zero, zero, e32, m1\n\t" "li t3, 8\n\t" // load stride for r_x #ifdef FUSE_CONV_RELU "fmv.w.x ft11, zero\n\t" #endif // FUSE_CONV_RELU "flw ft1, (%0)\n\t" "flw ft2, 4(%0)\n\t" "flw ft3, 8(%0)\n\t" "flw ft4, 12(%0)\n\t" "flw ft5, 16(%0)\n\t" "flw ft6, 20(%0)\n\t" "flw ft7, 24(%0)\n\t" "flw ft8, 28(%0)\n\t" "flw ft9, 32(%0)\n\t" // load k00 - k22 "vlw.v v10, (%0)\n\t" // k0 "addi %0, %0, 12\n\t" "vlw.v v11, (%0)\n\t" // k1 "addi %0, %0, 12\n\t" "vlw.v v12, (%0)\n\t" // k2 "vfmv.v.f v0, %16\n\t" // bias0 "mv t0, %5\n\t" // i_out_h = out_h "1:\n\t" // out_h "srai t1, %6, 2\n\t" // t1 = out_w >> 2 "beqz t1, 3f\n\t" "vsetvli zero, zero, e32, m1\n\t" // pre-load rxx "vlseg2e.v v4, (%1)\n\t" // v4[0..3] = r0[0,2.4.6] v5[0..3] = r0[1,3,5,7] "addi %1, %1, 8\n\t" // r0 += 2 "vlsw.v v1, (%1), t3\n\t" // r0[2,4,6,8] "addi %1, %1, 24\n\t" "2:\n\t" // out_w_loop4 "vlseg2e.v v6, (%2)\n\t" // v6[0..3] = r1[0,2.4.6] v7[0..3] = r1[1,3,5,7] "addi %2, %2, 8\n\t" "vfmul.vf v20, v4, ft1\n\t" // = k00 * r0[0,2,4,6] "vfmul.vf v21, v5, ft2\n\t" // = k01 * r0[1,3,5,7] "vlsw.v v2, (%2), t3\n\t" "addi %2, %2, 24\n\t" "vfmacc.vf v0, ft3, v1\n\t" // += k02 * r0[2,4,6,8] "vlseg2e.v v8, (%3)\n\t" // v8[0..3] = r2[0,2.4.6] v9[0..3] = r2[1,3,5,7] "addi %3, %3, 8\n\t" "vfmacc.vf v20, ft4, v6\n\t" // += k10 * r1[0,2,4,6] "vfmacc.vf v21, ft5, v7\n\t" // += k11 * r1[1,3,5,7] "vlsw.v v3, (%3), t3\n\t" "addi %3, %3, 24\n\t" "vfmacc.vf v0, ft6, v2\n\t" // += k12 * r1[2,4,6,8] "vlseg2e.v v4, (%1)\n\t" // v4[0..3] = r0[0,2.4.6] v5[0..3] = r0[1,3,5,7] "addi %1, %1, 8\n\t" // r0 += 2 "vfmacc.vf v20, ft7, v8\n\t" // += k20 * r2[0,2,4,6] "vfmacc.vf v21, ft8, v9\n\t" // += k21 * r2[1,3,5,7] "vlsw.v v1, (%1), t3\n\t" // r0[2,4,6,8] "addi %1, %1, 24\n\t" "vfmacc.vf v0, ft9, v3\n\t" // += k22 * r2[2,4,6,8] "vfadd.vv v2, v20, v21\n\t" "vfadd.vv v0, v0, v2\n\t" #ifdef FUSE_CONV_RELU "vfmax.vf v0, v0, ft11\n\t" // **** relu **** #endif // FUSE_CONV_RELU "vsw.v v0, (%4)\n\t" "addi %4, %4, 16\n\t" // outptr += 16 "vfmv.v.f v0, %16\n\t" // bias0 "addi t1, t1, -1\n\t" "bnez t1, 2b\n\t" "addi %1, %1, -32\n\t" // r0 -= 8 ********* bump r0 to origin addr ************ "3:\n\t" // out_w_tail "andi t2, %6, 3\n\t" // t2 = out_w & 3 "beqz t2, 5f\n\t" "4:\n\t" // out_w_tail "vlw.v v4, (%1)\n\t" // r0 "addi %1, %1, 8\n\t" "vlw.v v6, (%2)\n\t" // r1 "addi %2, %2, 8\n\t" "vlw.v v8, (%3)\n\t" // r2 "addi %3, %3, 8\n\t" "vfmul.vv v20, v4, v10\n\t" // r0 * k0 "vfmacc.vv v20, v6, v11\n\t" // += r1 * k1 "vfmacc.vv v20, v8, v12\n\t" // += r2 * k2 "li t4, 3\n\t" "vsetvli zero, t4, e32, m1\n\t" // set vl = 3 "vfredsum.vs v21, v20, v0\n\t" // v21[0] = v0[0](bias) + sum(v20[0..2]) "vfmv.f.s ft0, v21\n\t" // ft0 = v21[0] #ifdef FUSE_CONV_RELU "fmax.s ft0, ft0, ft11\n\t" // **** relu **** #endif // FUSE_CONV_RELU "fsw ft0, 0(%4)\n\t" "addi %4, %4, 4\n\t" // bump output_data pointer "addi t2, t2, -1\n\t" "bnez t2, 4b\n\t" "5:\n\t" "slli t2, %7, 2\n\t" // t2 = tailstep * 4 "add %1, %1, t2\n\t" "add %2, %2, t2\n\t" "add %3, %3, t2\n\t" // r0/r1/r2 += tailstep "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" :"=r"(kernel0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(outptr0), // %4 "=r"(out_h), // %5 "=r"(out_w), // %6 "=r"(tailstep) // %7 :"0"(kernel0), "1"(r0), "2"(r1), "3"(r2), "4"(outptr0), "5"(out_h), "6"(out_w), "7"(tailstep), "f"(bias0) // %16 :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v20", "v21", "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", "ft8", "ft9", "ft11", "t0", "t1", "t2", "t3", "t4" ); } #else const float *k0 = kernel0; const float *k1 = k0 + 3; const float *k2 = k1 + 3; int h = 0; for (; h < out_h; h++) { for (int w = 0; w < out_w; w++) { float sum0 = bias0; sum0 += r0[0] * k0[0] + r0[1] * k0[1] + r0[2] * k0[2]; sum0 += r1[0] * k1[0] + r1[1] * k1[1] + r1[2] * k1[2]; sum0 += r2[0] * k2[0] + r2[1] * k2[1] + r2[2] * k2[2]; #ifdef FUSE_CONV_RELU sum0 = sum0 > 0 ? sum0 : 0; #endif // FUSE_CONV_RELU *outptr0 = sum0; r0 += 2; r1 += 2; r2 += 2; outptr0++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } #endif // __riscv_vector csi_mem_free(input_padd_buf); return CSINN_TRUE; }
GB_reduce_each_index.c
//------------------------------------------------------------------------------ // GB_reduce_each_index: T(i)=reduce(A(i,:)), reduce a matrix to a vector //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Reduce a matrix to a vector. All entries in A(i,:) are reduced to T(i). // First, all threads reduce their slice to their own workspace, operating on // roughly the same number of entries each. The vectors in A are ignored; the // reduction only depends on the indices. Next, the threads cooperate to // reduce all workspaces to the workspace of thread 0. Finally, this last // workspace is collected into T. // If an out-of-memory condition occurs, the macro GB_FREE_ALL frees any // workspace. This has no effect on the built-in workers (GB_FREE_ALL does // nothing), and the workspace is freed in the caller. For the generic worker, // the GB_FREE_ALL macro defined in GB_reduce_to_vector frees all workspace. { //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- const GB_ATYPE *GB_RESTRICT Ax = (GB_ATYPE *) A->x ; const int64_t *GB_RESTRICT Ai = A->i ; const int64_t n = A->vlen ; size_t zsize = ttype->size ; //-------------------------------------------------------------------------- // allocate workspace for each thread //-------------------------------------------------------------------------- int ntasks = 256 * nthreads ; ntasks = GB_IMIN (ntasks, n) ; GB_void *GB_RESTRICT *Works = GB_CALLOC (nth, GB_void *) ; bool *GB_RESTRICT *Marks = GB_CALLOC (nth, bool *) ; int64_t *GB_RESTRICT Tnz = GB_CALLOC (nth, int64_t) ; int64_t *GB_RESTRICT Count = GB_CALLOC (ntasks+1, int64_t) ; bool ok = (Works != NULL && Marks != NULL && Tnz != NULL && Count != NULL) ; // This does not need to be parallel. The calloc does not take O(n) time. if (ok) { for (int tid = 0 ; tid < nth ; tid++) { Works [tid] = GB_MALLOC (n * zsize, GB_void) ; Marks [tid] = GB_CALLOC (n, bool) ; ok = ok && (Works [tid] != NULL && Marks [tid] != NULL) ; } } if (!ok) { // out of memory if (Works != NULL) { for (int tid = 0 ; tid < nth ; tid++) { GB_FREE (Works [tid]) ; } } if (Marks != NULL) { for (int tid = 0 ; tid < nth ; tid++) { GB_FREE (Marks [tid]) ; } } GB_FREE (Works) ; GB_FREE (Marks) ; GB_FREE (Tnz) ; GB_FREE (Count) ; GB_FREE_ALL ; return (GB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // reduce each slice in its own workspace //-------------------------------------------------------------------------- // each thread reduces its own slice in parallel int tid ; #pragma omp parallel for num_threads(nth) schedule(static) for (tid = 0 ; tid < nth ; tid++) { //---------------------------------------------------------------------- // get the workspace for this thread //---------------------------------------------------------------------- GB_CTYPE *GB_RESTRICT Work = (GB_CTYPE *) Works [tid] ; bool *GB_RESTRICT Mark = Marks [tid] ; int64_t my_tnz = 0 ; //---------------------------------------------------------------------- // reduce the entries //---------------------------------------------------------------------- for (int64_t p = pstart_slice [tid] ; p < pstart_slice [tid+1] ;p++) { int64_t i = Ai [p] ; // ztype aij = (ztype) Ax [p], with typecast GB_SCALAR (aij) ; GB_CAST_ARRAY_TO_SCALAR (aij, Ax, p) ; if (!Mark [i]) { // first time index i has been seen // Work [i] = aij ; no typecast GB_COPY_SCALAR_TO_ARRAY (Work, i, aij) ; Mark [i] = true ; my_tnz++ ; } else { // Work [i] += aij ; no typecast GB_ADD_SCALAR_TO_ARRAY (Work, i, aij) ; } } Tnz [tid] = my_tnz ; } //-------------------------------------------------------------------------- // reduce all workspace to Work [0] and count # entries in T //-------------------------------------------------------------------------- GB_CTYPE *GB_RESTRICT Work0 = (GB_CTYPE *) Works [0] ; bool *GB_RESTRICT Mark0 = Marks [0] ; int64_t tnz = Tnz [0] ; if (nth > 1) { int64_t i ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:tnz) for (i = 0 ; i < n ; i++) { for (int tid = 1 ; tid < nth ; tid++) { const bool *GB_RESTRICT Mark = Marks [tid] ; if (Mark [i]) { // thread tid has a contribution to index i const GB_CTYPE *GB_RESTRICT Work = (GB_CTYPE *) Works [tid]; if (!Mark0 [i]) { // first time index i has been seen // Work0 [i] = Work [i] ; no typecast GB_COPY_ARRAY_TO_ARRAY (Work0, i, Work, i) ; Mark0 [i] = true ; tnz++ ; } else { // Work0 [i] += Work [i] ; no typecast GB_ADD_ARRAY_TO_ARRAY (Work0, i, Work, i) ; } } } } // free all but workspace for thread 0 for (int tid = 1 ; tid < nth ; tid++) { GB_FREE (Works [tid]) ; GB_FREE (Marks [tid]) ; } } //-------------------------------------------------------------------------- // free workspace //-------------------------------------------------------------------------- GB_FREE (Works) ; GB_FREE (Marks) ; GB_FREE (Tnz) ; //-------------------------------------------------------------------------- // allocate T //-------------------------------------------------------------------------- // since T is a GrB_Vector, it is CSC and not hypersparse info = GB_create (&T, ttype, n, 1, GB_Ap_calloc, true, GB_FORCE_NONHYPER, GB_HYPER_DEFAULT, 1, tnz, true, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_FREE (Work0) ; GB_FREE (Mark0) ; GB_FREE (Count) ; GB_FREE_ALL ; return (GB_OUT_OF_MEMORY) ; } T->p [0] = 0 ; T->p [1] = tnz ; int64_t *GB_RESTRICT Ti = T->i ; GB_CTYPE *GB_RESTRICT Tx = (GB_CTYPE *) T->x ; T->nvec_nonempty = (tnz > 0) ? 1 : 0 ; //-------------------------------------------------------------------------- // gather the results into T //-------------------------------------------------------------------------- if (tnz == n) { //---------------------------------------------------------------------- // T is dense: transplant Work0 into T->x //---------------------------------------------------------------------- int64_t i ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (i = 0 ; i < n ; i++) { Ti [i] = i ; } GB_FREE (T->x) ; T->x = Work0 ; Work0 = NULL ; } else { //---------------------------------------------------------------------- // T is sparse: gather from Work0 and Mark0 //---------------------------------------------------------------------- if (nthreads == 1) { //------------------------------------------------------------------ // gather sparse T using a single thread //------------------------------------------------------------------ int64_t p = 0 ; for (int64_t i = 0 ; i < n ; i++) { if (Mark0 [i]) { Ti [p] = i ; // Tx [p] = Work0 [i], no typecast GB_COPY_ARRAY_TO_ARRAY (Tx, p, Work0, i) ; p++ ; } } ASSERT (p == tnz) ; } else { //------------------------------------------------------------------ // gather sparse T using multiple threads //------------------------------------------------------------------ // Some tasks may be completely empty and thus take no time at all; // 256 tasks per thread are created for better load balancing. int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic) for (taskid = 0 ; taskid < ntasks ; taskid++) { int64_t ifirst, ilast, p = 0 ; GB_PARTITION (ifirst, ilast, n, taskid, ntasks) ; for (int64_t i = ifirst ; i < ilast ; i++) { p += Mark0 [i] ; } Count [taskid] = p ; } GB_cumsum (Count, ntasks, NULL, 1) ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic) for (taskid = 0 ; taskid < ntasks ; taskid++) { int64_t ifirst, ilast, p = Count [taskid] ; int64_t my_count = (Count [taskid+1] - p) ; GB_PARTITION (ifirst, ilast, n, taskid, ntasks) ; if (my_count > 0) { for (int64_t i = ifirst ; i < ilast ; i++) { if (Mark0 [i]) { Ti [p] = i ; // Tx [p] = Work0 [i], no typecast GB_COPY_ARRAY_TO_ARRAY (Tx, p, Work0, i) ; p++ ; } } } } #ifdef GB_DEBUG // check result using a single thread int64_t p = 0 ; for (int64_t i = 0 ; i < n ; i++) { if (Mark0 [i]) { ASSERT (Ti [p] == i) ; p++ ; } } ASSERT (p == tnz) ; #endif } } //-------------------------------------------------------------------------- // free workspace //-------------------------------------------------------------------------- GB_FREE (Count) ; GB_FREE (Work0) ; GB_FREE (Mark0) ; }
DRB084-threadprivatemissing-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A file-scope variable used within a function called by a parallel region. No threadprivate is used to avoid data races. Data race pairs sum0@61:3 vs. sum0@61:8 sum0@61:3 vs. sum0@61:3 */ #include <stdio.h> #include <assert.h> int sum0=0, sum1=0; //#pragma omp threadprivate(sum0) void foo (int i) { sum0=sum0+i; } int main() { int i, sum=0; for (i=1;i<=1000;i++) { foo (i); } sum=sum+sum0; /* reference calculation */ for (i=1;i<=1000;i++) { sum1=sum1+i; } printf("sum=%d; sum1=%d\n",sum,sum1); // assert(sum==sum1); return 0; }
seq_kmeans.c
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* File: seq_kmeans.c (sequential version) */ /* Description: Implementation of simple k-means clustering algorithm */ /* This program takes an array of N data objects, each with */ /* M coordinates and performs a k-means clustering given a */ /* user-provided value of the number of clusters (K). The */ /* clustering results are saved in 2 arrays: */ /* 1. a returned array of size [K][N] indicating the center */ /* coordinates of K clusters */ /* 2. membership[N] stores the cluster center ids, each */ /* corresponding to the cluster a data object is assigned */ /* */ /* Author: Wei-keng Liao */ /* ECE Department, Northwestern University */ /* email: wkliao@ece.northwestern.edu */ /* */ /* Copyright (C) 2005, Northwestern University */ /* See COPYRIGHT notice in top-level directory. */ /* */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #include <stdio.h> #include <stdlib.h> #include <omp.h> #include "kmeans.h" /*----< euclid_dist_2() >----------------------------------------------------*/ /* square of Euclid distance between two multi-dimensional points */ __inline static float euclid_dist_2(int numdims, /* no. dimensions */ float *coord1, /* [numdims] */ float *coord2) /* [numdims] */ { int i; float ans=0.0; for (i=0; i<numdims; i++) ans += (coord1[i]-coord2[i]) * (coord1[i]-coord2[i]); return(ans); } /*----< find_nearest_cluster() >---------------------------------------------*/ __inline static int find_nearest_cluster(int numClusters, /* no. clusters */ int numCoords, /* no. coordinates */ float *object, /* [numCoords] */ float **clusters) /* [numClusters][numCoords] */ { int index, i; float dist, min_dist; /* find the cluster id that has min distance to object */ index = 0; min_dist = euclid_dist_2(numCoords, object, clusters[0]); for (i=1; i<numClusters; i++) { dist = euclid_dist_2(numCoords, object, clusters[i]); /* no need square root */ if (dist < min_dist) { /* find the min and its array index */ min_dist = dist; index = i; } } return(index); } /*----< seq_kmeans() >-------------------------------------------------------*/ /* return an array of cluster centers of size [numClusters][numCoords] */ int seq_kmeans(float **objects, /* in: [numObjs][numCoords] */ int numCoords, /* no. features */ int numObjs, /* no. objects */ int numClusters, /* no. clusters */ float threshold, /* % objects change membership */ int *membership, /* out: [numObjs] */ float **clusters) /* out: [numClusters][numCoords] */ { int i, j, index, loop=0; int *newClusterSize; /* [numClusters]: no. objects assigned in each new cluster */ float delta; /* % of objects change their clusters */ float **newClusters; /* [numClusters][numCoords] */ /* need to initialize newClusterSize and newClusters[0] to all 0 */ newClusterSize = (int*) calloc(numClusters, sizeof(int)); assert(newClusterSize != NULL); newClusters = (float**) malloc(numClusters * sizeof(float*)); assert(newClusters != NULL); newClusters[0] = (float*) calloc(numClusters * numCoords, sizeof(float)); assert(newClusters[0] != NULL); #pragma omp parallel { #pragma omp for nowait schedule(static) /* initialize membership[] */ for (i=0; i<numObjs; i++) membership[i] = -1; /*The address of each cluster differs from the previous one by numCoords. That means that each cluster differs from the newClusters[0] by i*numCoords. This change makes the following for loop free of data depedencies*/ #pragma omp for nowait schedule(static) for (i=1; i<numClusters; i++){ newClusters[i] = newClusters[0] + i*numCoords; } } do { delta = 0.0; #pragma omp parallel private (index, j) { #pragma omp for schedule(dynamic, 1) for (i=0; i<numObjs; i++) { /* find the array index of nestest cluster center */ index = find_nearest_cluster(numClusters, numCoords, objects[i], clusters); /* if membership changes, increase delta by 1 */ if (membership[i] != index) delta += 1.0; /* assign the membership to object i */ membership[i] = index; /* update new cluster center : sum of objects located within */ #pragma omp atomic newClusterSize[index]++; for (j=0; j<numCoords; j++) { #pragma omp atomic newClusters[index][j] += objects[i][j]; } } /* average the sum and replace old cluster center with newClusters */ #pragma omp for schedule(static) for (i=0; i<numClusters; i++) { for (j=0; j<numCoords; j++) { if (newClusterSize[i] > 0) clusters[i][j] = newClusters[i][j] / newClusterSize[i]; newClusters[i][j] = 0.0; /* set back to 0 */ } newClusterSize[i] = 0; /* set back to 0 */ } } delta /= numObjs; } while (delta > threshold && loop++ < 500); free(newClusters[0]); free(newClusters); free(newClusterSize); return 1; }
core_zsyssq.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" #include <math.h> /******************************************************************************/ __attribute__((weak)) void plasma_core_zsyssq(plasma_enum_t uplo, int n, const plasma_complex64_t *A, int lda, double *scale, double *sumsq) { int ione = 1; if (uplo == PlasmaUpper) { for (int j = 1; j < n; j++) // TODO: Inline this operation. LAPACK_zlassq(&j, &A[lda*j], &ione, scale, sumsq); } else { // PlasmaLower for (int j = 0; j < n-1; j++) { int len = n-j-1; // TODO: Inline this operation. LAPACK_zlassq(&len, &A[lda*j+j+1], &ione, scale, sumsq); } } *sumsq *= 2.0; for (int i = 0; i < n; i++) { // diagonal is complex, don't ignore complex part double absa = cabs(A[lda*i+i]); if (absa != 0.0) { // != propagates nan if (*scale < absa) { *sumsq = 1.0 + *sumsq*((*scale/absa)*(*scale/absa)); *scale = absa; } else { *sumsq = *sumsq + ((absa/(*scale))*(absa/(*scale))); } } } } /******************************************************************************/ void plasma_core_omp_zsyssq(plasma_enum_t uplo, int n, const plasma_complex64_t *A, int lda, double *scale, double *sumsq, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:scale[0:n]) \ depend(out:sumsq[0:n]) { if (sequence->status == PlasmaSuccess) { *scale = 0.0; *sumsq = 1.0; plasma_core_zsyssq(uplo, n, A, lda, scale, sumsq); } } } /******************************************************************************/ void plasma_core_omp_zsyssq_aux(int m, int n, const double *scale, const double *sumsq, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:scale[0:n]) \ depend(in:sumsq[0:n]) \ depend(out:value[0:1]) { if (sequence->status == PlasmaSuccess) { double scl = 0.0; double sum = 1.0; for (int j = 0; j < n; j++) { for (int i = j+1; i < n; i++) { int idx = m*j+i; if (scl < scale[idx]) { sum = sumsq[idx] + sum*((scl/scale[idx])*(scl/scale[idx])); scl = scale[idx]; } else if (scl > 0.) { sum = sum + sumsq[idx]*((scale[idx]/scl)*(scale[idx]/scl)); } } } sum = 2.0*sum; for (int j = 0; j < n; j++) { int idx = m*j+j; if (scl < scale[idx]) { sum = sumsq[idx] + sum*((scl/scale[idx])*(scl/scale[idx])); scl = scale[idx]; } else if (scl > 0.) { sum = sum + sumsq[idx]*((scale[idx]/scl)*(scale[idx]/scl)); } } *value = scl*sqrt(sum); } } }
appTwoFor_omp-lols.c
/* -- Libraries for C/C++ -- */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <iostream> using namespace std; /* -- library for parallelization of code -- */ // #include <pthread.h> #ifdef MPI #include <mpi.h> #endif #ifdef _OPENMP #include <omp.h> #endif /* variables to support parallelization of code */ #define DEFAULT_NUMTHREADS 16 // default constant and set value for the number of threads int numThreads; /* -- Application specific #defines and variables -- */ int numOuterIters; int probSize; #define DEFAULT_NUMOUTERITERS 1 #define DEFAULT_PROBSIZE 500 // Default values based on architecture. For a proper test, the problem size ought to be such that data goes out of cache. #define MAX_NUMITERS 100000000 #define MAX_PROBSIZE 16384 // Default values based on architecture. For a proper test, the problem size ought to be such that data goes out of cache. /* -- Debugging -- */ #define VERBOSE 1 /* -- Performance Measurement -- */ double totalTime = 0.0; FILE* myfile;// output file for experimental data /* -- Hardware Profiling -- */ // #define USE_PAPI #ifdef USE_PAPI #include <papi.h> // Leave PAPI out for now to make fully portable. Some platforms don't have the L2 and L3 cache misses available. TODO: need a way to check the counters in config or programmatically. #endif /* --Library for scheduling strategy and variables and macros associated with the library -- */ #include "vSched.h" // in the below macros, strat is how we specify the library #define FORALL_BEGIN(strat, s,e, start, end, tid, numThds ) loop_start_ ## strat (s,e ,&start, &end, tid, numThds); do { #define FORALL_END(strat, start, end, tid) } while( loop_next_ ## strat (&start, &end, tid)); int main (int argc, char** argv ); int i4_min ( int i1, int i2 ); void timestamp ( ); /******************************************************************************/ int main (int argc, char** argv ) /******************************************************************************/ /* Purpose MAIN is the main program for MANDELBROT_OPENMP. Discussion: MANDELBROT_OPENMP computes an image of the Mandelbrot set. Licensing: This code is distributed under the GNU LGPL license. Original Modified: 03 September 2012 Revision Modified: 19 April 2020 Author: John Burkardt Revision Author: Vivek Kale Purpose: The revision author uses the original code to demonstrate and test the use low-overhead loop scheduling strategy through library vSched Local Parameters: Local, int COUNT_MAX, the maximum number of iterations taken for a particular pixel. */ { int m = 500; int n = 500; int b[m][n]; int c; int count[m][n]; int count_max = 2000; int g[m][n]; int i; int j; int jhi; int jlo; int k; char const *output_filename = "mandelbrot_openmp.ppm"; // use const to allow for c++ string conversion FILE *output_unit; int r[m][n]; double wtime; double x_max = 1.25; double x_min = - 2.25; double x; double x1; double x2; double y_max = 1.75; double y_min = - 1.75; double y; double y1; double y2; #ifdef MPI MPI_Init(&argc, &argv); #endif //for vSched int numThreads = DEFAULT_NUMTHREADS; int threadNum; double static_fraction = 0.5; double constraint = 0.1; int chunk_size = 1; if(argc <= 2) // if user fails to put in minimum args, which are for application domain specific for this test { // char userReplyDefault; cout << "Usage: testAppTwo_omp_{loopSched} [probSize] [numOuterIters] [chunk_size] <static_fraction> <constraint>" << endl; cout << "Usage(cont'd): where {loopSched} is the implementation strategy or library you use, e.g., vSched's low-overhead scheduling, OpenMP rtl's low-overhead scheduling" << endl; // cout << "Use defaults? [y/N]" << endl ; // cin << varName; cout << "continuing with default problem size and app parameters." << endl; probSize = DEFAULT_PROBSIZE; numOuterIters = DEFAULT_NUMOUTERITERS; } // else // required args else { probSize = atoi(argv[1]); numOuterIters = atoi(argv[2]); } if (argc > 3) chunk_size = atoi(argv[3]); if (argc > 4) static_fraction = atof(argv[4]); if (argc > 5) constraint = atof(argv[5]); // set number of threads to input //omp_set_num_threads(numThreads); #pragma omp parallel { #pragma omp master cout << "Number of threads is : " << omp_get_num_threads() << endl; #ifdef USE_VSCHED numThreads = omp_get_num_threads(); #endif } #ifdef USE_VSCHED vSched_init(numThreads); #endif timestamp ( ); printf ( "\n" ); printf ( "MANDELBROT_OPENMP\n" ); printf ( " C/OpenMP version\n" ); printf ( "\n" ); printf ( " Create an ASCII PPM image of the Mandelbrot set.\n" ); printf ( "\n" ); printf ( " For each point C = X + i*Y\n" ); printf ( " with X range [%g,%g]\n", x_min, x_max ); printf ( " and Y range [%g,%g]\n", y_min, y_max ); printf ( " carry out %d iterations of the map\n", count_max ); printf ( " Z(n+1) = Z(n)^2 + C.\n" ); printf ( " If the iterates stay bounded (norm less than 2)\n" ); printf ( " then C is taken to be a member of the set.\n" ); printf ( "\n" ); printf ( " An ASCII PPM image of the set is created using\n" ); printf ( " M = %d pixels in the X direction and\n", m ); printf ( " N = %d pixels in the Y direction.\n", n ); omp_sched_t schedule; int chunksize = chunk_size; omp_get_schedule(&schedule, &chunksize); printf ( "OpenMP schedule: OMP_SCHEDULE=%d\t%d\n", (int) schedule, chunksize); wtime = omp_get_wtime ( ); /* Carry out the iteration for each pixel, determining COUNT. */ for (int iter = 0; iter < numOuterIters; iter++) { # pragma omp parallel \ shared ( b, count, count_max, g, r, x_max, x_min, y_max, y_min ) \ private ( i, j, k, x, x1, x2, y, y1, y2 ) { // # pragma omp for //TODO: figure out how to better template the scheduling strategy name in the below lines of code for both library implmentations // # pragma omp for schedule(user:statdynstaggered, &lr) collapse(2) // prototype UDS placeholder // The first parameter is the loop scheduling strategy. threadNum = omp_get_thread_num(); numThreads = omp_get_num_threads(); #ifdef USE_VSCHED int startInd; int endInd; // chunk start and end indices for vSched runtime to set and retrieve setCDY(static_fraction, constraint, chunk_size); // set parameter of scheduling strategy for vSched probSize = m; // set to loop bound of loop that gets parallelized by OpenMP FORALL_BEGIN(statdynstaggered, 0, probSize, startInd, endInd, threadNum, numThreads) #ifdef VERBOSE if(VERBOSE==1) printf("Thread [%d] : iter = %d \t startInd = %d \t endInd = %d \t\n", threadNum,iter, startInd, endInd); #endif for (i = startInd ; i < endInd ; i++) #else #ifdef VERBOSE if(VERBOSE==1) printf("Thread [%d] : iter = %d executing a chunk \n", threadNum,iter); #endif #pragma omp for schedule(guided, chunk_size) { // add parens to show comparison with forall macro for ( i = 0; i < m; i++ ) #endif { y = ( ( double ) ( i - 1 ) * y_max + ( double ) ( m - i ) * y_min ) / ( double ) ( m - 1 ); for ( j = 0; j < n; j++ ) { x = ( ( double ) ( j - 1 ) * x_max + ( double ) ( n - j ) * x_min ) / ( double ) ( n - 1 ); count[i][j] = 0; x1 = x; y1 = y; for ( k = 1; k <= count_max; k++ ) { x2 = x1 * x1 - y1 * y1 + x; y2 = 2 * x1 * y1 + y; if ( x2 < -2.0 || 2.0 < x2 || y2 < -2.0 || 2.0 < y2 ) { count[i][j] = k; break; } x1 = x2; y1 = y2; } if ( ( count[i][j] % 2 ) == 1 ) { r[i][j] = 255; g[i][j] = 255; b[i][j] = 255; } else { c = ( int ) ( 255.0 * sqrt ( sqrt ( sqrt ( ( ( double ) ( count[i][j] ) / ( double ) ( count_max ) ) ) ) ) ); r[i][j] = 3 * c / 5; g[i][j] = 3 * c / 5; b[i][j] = c; } } } #ifdef USE_VSCHED FORALL_END(statdynstaggered, startInd, endInd, threadNum) #else } //End parallelized for block #endif } // end parallel } // end outer iter loop wtime = omp_get_wtime ( ) - wtime; printf ( "\n" ); printf ( " Time = %g seconds.\n", wtime ); /* Write data to an ASCII PPM file. */ output_unit = fopen ( output_filename, "wt" ); fprintf ( output_unit, "P3\n" ); fprintf ( output_unit, "%d %d\n", n, m ); fprintf ( output_unit, "%d\n", 255 ); for ( i = 0; i < m; i++ ) { for ( jlo = 0; jlo < n; jlo = jlo + 4 ) { jhi = i4_min ( jlo + 4, n ); for ( j = jlo; j < jhi; j++ ) { fprintf ( output_unit, " %d %d %d", r[i][j], g[i][j], b[i][j] ); } fprintf ( output_unit, "\n" ); } } fclose ( output_unit ); printf ( "\n" ); printf ( " Graphics data written to \"%s\".\n", output_filename ); /* Terminate. */ printf ( "\n" ); printf ( "MANDELBROT_OPENMP\n" ); printf ( " Normal end of execution.\n" ); printf ( "\n" ); timestamp ( ); #ifdef USE_VSCHED vSched_finalize(numThreads); #endif #ifdef MPI MPI_Finalize(MPI_COMM_WORLD); #endif return 0; } /******************************************************************************/ int i4_min ( int i1, int i2 ) /******************************************************************************/ /* Purpose: I4_MIN returns the smaller of two I4's. Licensing: This code is distributed under the GNU LGPL license. Modified: 29 August 2006 Author: John Burkardt Parameters: Input, int I1, I2, two integers to be compared. Output, int I4_MIN, the smaller of I1 and I2. */ { int value; if ( i1 < i2 ) { value = i1; } else { value = i2; } return value; } /******************************************************************************/ void timestamp ( ) /******************************************************************************/ /* Purpose: TIMESTAMP prints the current YMDHMS date as a time stamp. Example: 31 May 2001 09:45:54 AM Modified: 24 September 2003 Author: John Burkardt Parameters: None */ { # define TIME_SIZE 40 static char time_buffer[TIME_SIZE]; const struct tm *tm; time_t now; now = time ( NULL ); tm = localtime ( &now ); strftime ( time_buffer, TIME_SIZE, "%d %B %Y %I:%M:%S %p", tm ); printf ( "%s\n", time_buffer ); return; # undef TIME_SIZE }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr( NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl * startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Optional<std::pair<unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, ConceptDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckForDelayedContext = true); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
GB_unop__sinh_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__sinh_fp32_fp32) // op(A') function: GB (_unop_tran__sinh_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = sinhf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = sinhf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = sinhf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SINH || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__sinh_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = sinhf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = sinhf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__sinh_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
poisson.ref.c
#include <sys/time.h> #include <time.h> #include <stdio.h> static unsigned long long current_time_ns() { #ifdef __MACH__ clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec; return (unsigned long long)mts.tv_nsec + s; #else struct timespec t ={0,0}; clock_gettime(CLOCK_MONOTONIC, &t); unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec; return (((unsigned long long)t.tv_nsec)) + s; #endif } # include <stdlib.h> # include <stdio.h> # include <math.h> # include <time.h> # include <string.h> # include "poisson.h" # include "main.h" # include "timer.h" double r8mat_rms(int nx, int ny, double *a_); void rhs(int nx, int ny, double *f_, int block_size); void timestamp(void); double u_exact(double x, double y); double uxxyy_exact(double x, double y); /* Purpose: MAIN is the main program for POISSON_OPENMP. Discussion: POISSON_OPENMP is a program for solving the Poisson problem. This program uses OpenMP for parallel execution. The Poisson equation - DEL^2 U(X,Y) = F(X,Y) is solved on the unit square [0,1] x [0,1] using a grid of NX by NX evenly spaced points. The first and last points in each direction are boundary points. The boundary conditions and F are set so that the exact solution is U(x,y) = sin ( pi * x * y) so that - DEL^2 U(x,y) = pi^2 * ( x^2 + y^2) * sin ( pi * x * y) The Jacobi iteration is repeatedly applied until convergence is detected. For convenience in writing the discretized equations, we assume that NX = NY. Licensing: This code is distributed under the GNU LGPL license. Modified: 14 December 2011 Author: John Burkardt */ /******************************************************************************/ double run(struct user_parameters* params) { int matrix_size = params->matrix_size; if (matrix_size <= 0) { matrix_size = 512; params->matrix_size = matrix_size; } int block_size = params->blocksize; if (block_size <= 0) { block_size = 128; params->blocksize = block_size; } int niter = params->titer; if (niter <= 0) { niter = 4; params->titer = niter; } double dx; double dy; double error; int ii,i; int jj,j; int nx = matrix_size; int ny = matrix_size; double *f = (double *)malloc(nx * nx * sizeof(double)); double *u = (double *)malloc(nx * nx * sizeof(double)); double *unew = (double *)malloc(nx * ny * sizeof(double)); /* test if valid */ if ( (nx % block_size) || (ny % block_size) ) { params->succeed = 0; params->string2display = "*****ERROR: blocsize must divide NX and NY"; return 0; } /// INITIALISATION dx = 1.0 / (double) (nx - 1); dy = 1.0 / (double) (ny - 1); // Set the right hand side array F. rhs(nx, ny, f, block_size); /* Set the initial solution estimate UNEW. We are "allowed" to pick up the boundary conditions exactly. */ #pragma omp parallel { #pragma omp single for (j = 0; j < ny; j+= block_size) { for (i = 0; i < nx; i+= block_size) { #pragma omp task firstprivate(i,j) private(ii,jj) for (jj=j; jj<j+block_size; ++jj) { for (ii=i; ii<i+block_size; ++ii) { if (ii == 0 || ii == nx - 1 || jj == 0 || jj == ny - 1) { (unew)[ii * ny + jj] = (f)[ii * ny + jj]; } else { (unew)[ii * ny + jj] = 0.0; } } } } } } /// KERNEL INTENSIVE COMPUTATION START_TIMER; sweep(nx, ny, dx, dy, f, 0, niter, u, unew, block_size); END_TIMER; if(params->check) { double x; double y; double *udiff = (double *)malloc(nx * ny * sizeof(double)); /// CHECK OUTPUT // Check for convergence. for (j = 0; j < ny; j++) { y = (double) (j) / (double) (ny - 1); for (i = 0; i < nx; i++) { x = (double) (i) / (double) (nx - 1); (udiff)[i * ny + j] = (unew)[i * ny + j] - u_exact(x, y); } } error = r8mat_rms(nx, ny, udiff); double error1; // Set the right hand side array F. rhs(nx, ny, f, block_size); /* Set the initial solution estimate UNEW. We are "allowed" to pick up the boundary conditions exactly. */ for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { if (i == 0 || i == nx - 1 || j == 0 || j == ny - 1) { (unew)[i * ny + j] = (f)[i * ny + j]; } else { (unew)[i * ny + j] = 0.0; } } } sweep_seq(nx, ny, dx, dy, f, 0, niter, u, unew); // Check for convergence. for (j = 0; j < ny; j++) { y = (double) (j) / (double) (ny - 1); for (i = 0; i < nx; i++) { x = (double) (i) / (double) (nx - 1); (udiff)[i * ny + j] = (unew)[i * ny + j] - u_exact(x, y); } } error1 = r8mat_rms(nx, ny, udiff); params->succeed = fabs(error - error1) < 1.0E-6; free(udiff); } free(f); free(u); free(unew); return TIMER; } /* R8MAT_RMS returns the RMS norm of a vector stored as a matrix. */ double r8mat_rms(int nx, int ny, double *a_) { double (*a)[nx][ny] = (double (*)[nx][ny])a_; int i; int j; double v; v = 0.0; for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { v = v + (*a)[i][j] * (*a)[i][j]; } } v = sqrt(v / (double) (nx * ny)); return v; } /* RHS initializes the right hand side "vector". */ void rhs(int nx, int ny, double *f, int block_size) { int i,ii; int j,jj; double x; double y; // The "boundary" entries of F store the boundary values of the solution. // The "interior" entries of F store the right hand sides of the Poisson equation. #pragma omp parallel { #pragma omp single for (j = 0; j < ny; j+=block_size) { for (i = 0; i < nx; i+=block_size) { #pragma omp task firstprivate(block_size,i,j,nx,ny) private(ii,jj,x,y) for (jj=j; jj<j+block_size; ++jj) { y = (double) (jj) / (double) (ny - 1); for (ii=i; ii<i+block_size; ++ii) { x = (double) (ii) / (double) (nx - 1); if (ii == 0 || ii == nx - 1 || jj == 0 || jj == ny - 1) (f)[ii * ny + jj] = u_exact(x, y); else (f)[ii * ny + jj] = - uxxyy_exact(x, y); } } } } } } /* Evaluates the exact solution. */ double u_exact(double x, double y) { double pi = 3.141592653589793; double value; value = sin(pi * x * y); return value; } /* Evaluates (d/dx d/dx + d/dy d/dy) of the exact solution. */ double uxxyy_exact(double x, double y) { double pi = 3.141592653589793; double value; value = - pi * pi * (x * x + y * y) * sin(pi * x * y); return value; }
omp_nested_loop1.c
#include <stdio.h> #include <omp.h> int main() { int i, j; #pragma omp parallel num_threads(4) { #pragma omp for private(j) for (i = 9; i > 6; i--) { printf("[%d] (i = %d) \n", omp_get_thread_num(), i); for (j = 0; j < 5; j++) { printf("[%d] (i , j = %d, %d)\n", omp_get_thread_num(), i, j); } } } return 0; }
GB_unaryop__identity_uint8_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint8_int8 // op(A') function: GB_tran__identity_uint8_int8 // C type: uint8_t // A type: int8_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint8_int8 ( uint8_t *Cx, // Cx and Ax may be aliased int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint8_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
raytracer.h
#pragma once #include "resource.h" #include <linalg.h> #include <memory> #include <omp.h> #include <random> #include <time.h> using namespace linalg::aliases; namespace cg::renderer { struct ray { ray(float3 position, float3 direction) : position(position) { this->direction = normalize(direction); } float3 position; float3 direction; }; struct payload { float t; float3 bary; cg::color color; }; template<typename VB> struct triangle { triangle(const VB& vertex_a, const VB& vertex_b, const VB& vertex_c); float3 a; float3 b; float3 c; float3 ba; float3 ca; float3 na; float3 nb; float3 nc; float3 ambient; float3 diffuse; float3 emissive; }; template<typename VB> inline triangle<VB>::triangle(const VB& vertex_a, const VB& vertex_b, const VB& vertex_c) { a = float3{ vertex_a.x, vertex_a.y, vertex_a.z }; b = float3{ vertex_b.x, vertex_b.y, vertex_b.z }; c = float3{ vertex_c.x, vertex_c.y, vertex_c.z }; ba = b - a; ca = c - a; na = float3{ vertex_a.nx, vertex_a.ny, vertex_a.nz }; nb = float3{ vertex_b.nx, vertex_b.ny, vertex_b.nz }; nc = float3{ vertex_c.nx, vertex_c.ny, vertex_c.nz }; ambient = { vertex_a.ambient_r, vertex_a.ambient_g, vertex_a.ambient_b, }; diffuse = { vertex_a.diffuse_r, vertex_a.diffuse_g, vertex_a.diffuse_b, }; emissive = { vertex_a.emissive_r, vertex_a.emissive_g, vertex_a.emissive_b, }; } template<typename VB> class aabb { public: void add_triangle(const triangle<VB> triangle); const std::vector<triangle<VB>>& get_triangles() const; bool aabb_test(const ray& ray) const; protected: std::vector<triangle<VB>> triangles; float3 aabb_min; float3 aabb_max; }; struct light { float3 position; float3 color; }; template<typename VB, typename RT> class raytracer { public: raytracer(){}; ~raytracer(){}; void set_render_target(std::shared_ptr<resource<RT>> in_render_target); void clear_render_target(const RT& in_clear_value); void set_viewport(size_t in_width, size_t in_height); void set_per_shape_vertex_buffer( std::vector<std::shared_ptr<cg::resource<VB>>> in_per_shape_vertex_buffer); void build_acceleration_structure(); std::vector<aabb<VB>> acceleration_structures; void ray_generation(float3 position, float3 direction, float3 right, float3 up, float frame_weight = 1.f); payload trace_ray(const ray& ray, size_t depth, float max_t = 1000.f, float min_t = 0.001f) const; payload intersection_shader(const triangle<VB>& triangle, const ray& ray) const; std::function<payload(const ray& ray)> miss_shader = nullptr; std::function<payload(const ray& ray, payload& payload, const triangle<VB>& triangle)> closest_hit_shader = nullptr; std::function<payload(const ray& ray, payload& payload, const triangle<VB>& triangle)> any_hit_shader = nullptr; protected: std::shared_ptr<cg::resource<RT>> render_target; std::vector<std::shared_ptr<cg::resource<VB>>> per_shape_vertex_buffer; float get_random(const int thread_num, float range = 0.1f) const; size_t width = 1920; size_t height = 1080; }; template<typename VB, typename RT> inline void raytracer<VB, RT>::set_render_target(std::shared_ptr<resource<RT>> in_render_target) { render_target = in_render_target; } template<typename VB, typename RT> inline void raytracer<VB, RT>::clear_render_target(const RT& in_clear_value) { for (size_t i = 0; i < render_target->get_number_of_elements(); i++) { render_target->item(i) = in_clear_value; } } template<typename VB, typename RT> inline void raytracer<VB, RT>::set_per_shape_vertex_buffer( std::vector<std::shared_ptr<cg::resource<VB>>> in_per_shape_vertex_buffer) { per_shape_vertex_buffer = in_per_shape_vertex_buffer; } template<typename VB, typename RT> inline void raytracer<VB, RT>::build_acceleration_structure() { for (auto& vertex_buffer : per_shape_vertex_buffer) { size_t vertex_id = 0; aabb<VB> aabb; while (vertex_id < vertex_buffer->get_number_of_elements()) { triangle<VB> triangle( vertex_buffer->item(vertex_id++), vertex_buffer->item(vertex_id++), vertex_buffer->item(vertex_id++)); aabb.add_triangle(triangle); } acceleration_structures.push_back(aabb); } } template<typename VB, typename RT> inline void raytracer<VB, RT>::set_viewport(size_t in_width, size_t in_height) { width = in_width; height = in_height; } template<typename VB, typename RT> inline void raytracer<VB, RT>::ray_generation( float3 position, float3 direction, float3 right, float3 up, float frame_weight) { for (int x = 0; x < width; x++) { #pragma omp parallel for for (int y = 0; y < height; y++) { float x_jitter = 0; // get_random(omp_get_thread_num() + clock(), 0.05f); float y_jitter = 0; // get_random(omp_get_thread_num() + clock() + 1, 0.05f); float u = (x * 2.f + x_jitter) / static_cast<float>(width - 1) - 1.f; u *= static_cast<float>(width) / static_cast<float>(height); float v = (y * 2.f + y_jitter) / static_cast<float>(height - 1) - 1.f; float3 ray_direction = direction + u * right - v * up; ray ray(position, ray_direction); payload payload = trace_ray(ray, 1); cg::color accumed = cg::color::from_float3(render_target->item(x, y).to_float3()); cg::color result{ payload.color.r * frame_weight + accumed.r * (1.f - frame_weight), payload.color.g * frame_weight + accumed.g * (1.f - frame_weight), payload.color.b * frame_weight + accumed.b * (1.f - frame_weight)}; render_target->item(x, y) = RT::from_color(result); } } } template<typename VB, typename RT> inline payload raytracer<VB, RT>::trace_ray(const ray& ray, size_t depth, float max_t, float min_t) const { if (depth == 0) { return miss_shader(ray); } depth--; payload closest_hit_payload = {}; closest_hit_payload.t = max_t; const triangle<VB>* closest_triangle = nullptr; for (auto& aabb : acceleration_structures) { if (aabb.aabb_test(ray)) { for (auto& triangle : aabb.get_triangles()) { payload payload = intersection_shader(triangle, ray); if (payload.t > min_t && payload.t < closest_hit_payload.t) { closest_hit_payload = payload; closest_triangle = &triangle; if (any_hit_shader) { return any_hit_shader(ray, payload, triangle); } } } } } if (closest_hit_payload.t < max_t) { if (closest_hit_shader) { return closest_hit_shader(ray, closest_hit_payload, *closest_triangle); } } return miss_shader(ray); } template<typename VB, typename RT> inline payload raytracer<VB, RT>::intersection_shader(const triangle<VB>& triangle, const ray& ray) const { payload payload{}; payload.t = -1.f; float3 pvec = cross(ray.direction, triangle.ca); float det = dot(triangle.ba, pvec); if (det > -1e-8 && det < 1e-8) return payload; float inv_det = 1.f / det; float3 tvec = ray.position - triangle.a; float u = dot(tvec, pvec) * inv_det; if (u < 0.f || u > 1.f) return payload; float3 qvec = cross(tvec, triangle.ba); float v = dot(ray.direction, qvec) * inv_det; if(v < 0.f || u + v > 1.f) return payload; payload.t = dot(triangle.ca, qvec) * inv_det; payload.bary = {1.f - u - v, u, v}; return payload; } template<typename VB, typename RT> inline float raytracer<VB, RT>::get_random(const int thread_num, const float range) const { static std::default_random_engine generator(thread_num); static std::normal_distribution<float> distribution(0.f, range); return distribution(generator); } template<typename VB> inline void aabb<VB>::add_triangle(const triangle<VB> triangle) { if (triangles.empty()) { aabb_min = aabb_max = triangle.a; } aabb_min = min(aabb_min, min(min(triangle.a, triangle.b), triangle.c)); aabb_max = max(aabb_max, max(max(triangle.a, triangle.b), triangle.c)); triangles.push_back(triangle); } template<typename VB> inline const std::vector<triangle<VB>>& aabb<VB>::get_triangles() const { return triangles; } template<typename VB> inline bool aabb<VB>::aabb_test(const ray& ray) const { float3 inv_ray_direction = float3(1.f) / ray.direction; float3 t0 = (aabb_max - ray.position) * inv_ray_direction; float3 t1 = (aabb_min - ray.position) * inv_ray_direction; float3 tmin = min(t0, t1); float3 tmax = max(t0, t1); return maxelem(tmin) <= minelem(tmax); } } // namespace cg::renderer
7746.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "correlation.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < m; i++) for (j = 0; j < n; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_correlation(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m), DATA_TYPE POLYBENCH_1D(stddev,M,m)) { int i, j, j1, j2; DATA_TYPE eps = 0.1f; #define sqrt_of_array_cell(x,j) sqrt(x[j]) #pragma scop /* Determine mean of column vectors of input data matrix */ #pragma omp parallel private(i, j, j2) num_threads(#P11) { #pragma omp for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Determine standard deviations of column vectors of data matrix. */ #pragma omp for (j = 0; j < _PB_M; j++) { stddev[j] = 0.0; for (i = 0; i < _PB_N; i++) stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]); stddev[j] /= float_n; stddev[j] = sqrt_of_array_cell(stddev, j); /* The following in an inelegant but usual way to handle near-zero std. dev. values, which below would cause a zero- divide. */ stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j]; } /* Center and reduce the column vectors. */ #pragma omp for (i = 0; i < _PB_N; i++) { for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; data[i][j] /= sqrt(float_n) * stddev[j]; } } /* Calculate the m * m correlation matrix. */ #pragma omp for (j1 = 0; j1 < _PB_M-1; j1++) { symmat[j1][j1] = 1.0; for (j2 = j1+1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += (data[i][j1] * data[i][j2]); symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop symmat[_PB_M-1][_PB_M-1] = 1.0; } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_correlation (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean), POLYBENCH_ARRAY(stddev)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); POLYBENCH_FREE_ARRAY(stddev); return 0; }
XT_OffsetError.c
/* ============================================================================ * Copyright (c) 2015 K. Aditya Mohan (Purdue University) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or * other materials provided with the distribution. * * Neither the name of K. Aditya Mohan, Purdue * University, nor the names of its contributors may be used * to endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <stdio.h> #include "nrutil.h" #include "XT_Constants.h" #include "XT_Structures.h" #include <mpi.h> #include <math.h> #include "XT_IOMisc.h" #include "invert.h" #include "allocate.h" void gen_offset_constraint_windows (Sinogram* SinogramPtr, TomoInputs* TomoInputsPtr) { int32_t r_size, t_size, num = 0, i, j, k, l, dim[4], N_t_node, N_r, N_t, node_rank, node_num, node_idx, k_idx, l_idx; char constraint_file[100] = "proj_constraint"; node_rank = TomoInputsPtr->node_rank; node_num = TomoInputsPtr->node_num; N_r = SinogramPtr->N_r; N_t_node = SinogramPtr->N_t; N_t = N_t_node*node_num; r_size = 2*N_r/((int32_t)(sqrt(N_r) + 0.5)); t_size = 2*N_t/((int32_t)(sqrt(N_t) + 0.5)); for (i = 0; i <= N_r - r_size/2; i = i + r_size/2) for (j = 0; j <= N_t - t_size/2; j = j + t_size/2) num++; SinogramPtr->off_constraint = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, num, N_r, N_t_node); memset(&(SinogramPtr->off_constraint[0][0][0]), 0, num*N_r*N_t_node*sizeof(Real_arr_t)); for (num = 0, i = 0; i <= N_r - r_size/2; i = i + r_size/2) for (j = 0; j <= N_t - t_size/2; j = j + t_size/2) { for (k = i; k < i + r_size; k++) for (l = j; l < j + t_size; l++) { node_idx = node_rank*N_t_node; k_idx = k % N_r; l_idx = l % N_t; if (l_idx >= node_idx && l_idx < node_idx + N_t_node) { SinogramPtr->off_constraint[num][k_idx][l_idx-node_idx] = (k-i) < r_size/2 ? (k-i+1): r_size-(k-i); SinogramPtr->off_constraint[num][k_idx][l_idx-node_idx] *= (l-j) < t_size/2 ? (l-j+1): t_size-(l-j); } } num++; } SinogramPtr->off_constraint_num = num; dim[0] = 1; dim[1] = num; dim[2] = SinogramPtr->N_r; dim[3] = SinogramPtr->N_t; sprintf(constraint_file, "%s_n%d", constraint_file, node_rank); if (TomoInputsPtr->Write2Tiff == 1) WriteMultiDimArray2Tiff (constraint_file, dim, 0, 1, 2, 3, &(SinogramPtr->off_constraint[0][0][0]), 0, TomoInputsPtr->debug_file_ptr); fprintf(TomoInputsPtr->debug_file_ptr, "gen_offset_constraint_windows: r_size = %d, t_size = %d, number of constraints = %d\n", r_size, t_size, SinogramPtr->off_constraint_num); /* SinogramPtr->off_constraint_size = SinogramPtr->N_r; SinogramPtr->off_constraint = (Real_t**)multialloc(sizeof(Real_t), 2, 1, SinogramPtr->N_r); for (j = 0; j < SinogramPtr->N_r; j++) SinogramPtr->off_constraint[0][j] = 1; SinogramPtr->off_constraint_num = 1;*/ } void constrained_quad_opt (Real_t** Lambda, Real_t** b, Real_arr_t*** A, Real_arr_t** x, int32_t Nr, int32_t Nt, int32_t M, TomoInputs* TomoInputsPtr) { Real_t **D, **Dinv; Real_t *temp, *temp2; int32_t i, j, k, l; D = (Real_t**)multialloc(sizeof(Real_t), 2, M, M); Dinv = (Real_t**)multialloc(sizeof(Real_t), 2, M, M); temp = (Real_t*)get_spc(M, sizeof(Real_t)); temp2 = (Real_t*)get_spc(M, sizeof(Real_t)); memset(&(D[0][0]), 0, M*M*sizeof(Real_t)); memset(&(Dinv[0][0]), 0, M*M*sizeof(Real_t)); #pragma omp parallel for collapse(2) private(k, l) for (i = 0; i < M; i++) for (j = 0; j < M; j++) for (k = 0; k < Nr; k++) for (l = 0; l < Nt; l++) { D[i][j] += A[i][k][l]*A[j][k][l]/Lambda[k][l]; /*sum += A[i][k]*A[j][k]/Lambda[k];*/ } /* TomoInputsPtr->t0_mpired1 = time(NULL); */ MPI_Allreduce(&(D[0][0]), &(Dinv[0][0]), M*M, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); /* TomoInputsPtr->time_mpired1 += difftime(time(NULL), TomoInputsPtr->t0_mpired1);*/ /* printf("Checksum is %f\n", sum);*/ invert2(Dinv, M); #pragma omp parallel for private(j, k) for (i = 0; i < M; i++) { temp[i] = 0; for (j = 0; j < Nr; j++) for (k = 0; k < Nt; k++) temp[i] += A[i][j][k]*b[j][k]/Lambda[j][k]; } /* TomoInputsPtr->t0_mpired1 = time(NULL);*/ MPI_Allreduce(&(temp[0]), &(temp2[0]), M, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); /* TomoInputsPtr->time_mpired1 += difftime(time(NULL), TomoInputsPtr->t0_mpired1);*/ #pragma omp parallel for private(j) for (i = 0; i < M; i++) { temp[i] = 0; for (j = 0; j < M; j++) temp[i] += Dinv[i][j]*temp2[j]; } /* TomoInputsPtr->t0_mpired1 = time(NULL); MPI_Allreduce(&(temp[0]), &(temp2[0]), M, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); TomoInputsPtr->time_mpired1 += difftime(time(NULL), TomoInputsPtr->t0_mpired1);*/ #pragma omp parallel for collapse(2) private(k) for (i = 0; i < Nr; i++) for (j = 0; j < Nt; j++) { x[i][j] = 0; for (k = 0; k < M; k++) x[i][j] += A[k][i][j]*temp[k]; } #pragma omp parallel for collapse(2) for (i = 0; i < Nr; i++) for (j = 0; j < Nt; j++) x[i][j] = (b[i][j] - x[i][j])/Lambda[i][j]; free(temp); free(temp2); multifree(D, 2); multifree(Dinv, 2); } void compute_d_constraint (Real_arr_t*** A, Real_arr_t **d, int32_t Nr, int32_t Nt, int32_t M, FILE* debug_file_ptr) { int32_t i, j, k; Real_t *temp, *val; temp = (Real_t*)get_spc(M, sizeof(Real_t)); val = (Real_t*)get_spc(M, sizeof(Real_t)); #pragma omp parallel for private(j, k) for (i = 0; i < M; i++) { temp[i] = 0; for (j = 0; j < Nr; j++) for (k = 0; k < Nt; k++) temp[i] += A[i][j][k]*d[j][k]; } MPI_Allreduce(&(temp[0]), &(val[0]), M, MPI_REAL_DATATYPE, MPI_SUM, MPI_COMM_WORLD); for (i = 0; i < M; i++) fprintf(debug_file_ptr, "compute_d_constraint: The i th constraint on offset error is %f\n", val[i]); free(temp); free(val); }
DataVectorOps.h
/***************************************************************************** * * Copyright (c) 2003-2020 by The University of Queensland * http://www.uq.edu.au * * Primary Business: Queensland, Australia * Licensed under the Apache License, version 2.0 * http://www.apache.org/licenses/LICENSE-2.0 * * Development until 2012 by Earth Systems Science Computational Center (ESSCC) * Development 2012-2013 by School of Earth Sciences * Development from 2014-2017 by Centre for Geoscience Computing (GeoComp) * Development from 2019 by School of Earth and Environmental Sciences ** *****************************************************************************/ #ifndef __ESCRIPT_DATAMATHS_H__ #define __ESCRIPT_DATAMATHS_H__ #include "DataAbstract.h" #include "DataException.h" #include "ArrayOps.h" #include "LapackInverseHelper.h" #include "DataTagged.h" #include <complex> /** \file DataVectorOps.h \brief Describes binary operations performed on DataVector. For operations on DataReady see BinaryDataReadyOp.h. For operations on double* see ArrayOps.h. */ namespace escript { /** In order to properly identify the datapoints, in most cases, the vector, shape and offset of the point must all be supplied. Note that vector in this context refers to a data vector storing datapoints not a mathematical vector. (However, datapoints within the data vector could represent scalars, vectors, matricies, ...). */ /** \brief Perform a matrix multiply of the given views. NB: Only multiplies together the two given datapoints, would need to call this over all data-points to multiply the entire Data objects involved. \param left,right - vectors containing the datapoints \param leftShape,rightShape - shapes of datapoints in the vectors \param leftOffset,rightOffset - beginnings of datapoints in the vectors \param result - Vector to store the resulting datapoint in \param resultShape - expected shape of the resulting datapoint */ ESCRIPT_DLL_API void matMult(const DataTypes::RealVectorType& left, const DataTypes::ShapeType& leftShape, DataTypes::RealVectorType::size_type leftOffset, const DataTypes::RealVectorType& right, const DataTypes::ShapeType& rightShape, DataTypes::RealVectorType::size_type rightOffset, DataTypes::RealVectorType& result, const DataTypes::ShapeType& resultShape); // Hmmmm why is there no offset for the result?? /** \brief Determine the shape of the result array for a matrix multiplication of the given views. \param left,right - shapes of the left and right matricies \return the shape of the matrix which would result from multiplying left and right */ ESCRIPT_DLL_API DataTypes::ShapeType determineResultShape(const DataTypes::ShapeType& left, const DataTypes::ShapeType& right); /** \brief computes a symmetric matrix from your square matrix A: (A + transpose(A)) / 2 \param in - vector containing the matrix A \param inShape - shape of the matrix A \param inOffset - the beginning of A within the vector in \param ev - vector to store the output matrix \param evShape - expected shape of the output matrix \param evOffset - starting location for storing ev in vector ev */ template<typename VEC> inline void symmetric(const VEC& in, const DataTypes::ShapeType& inShape, typename VEC::size_type inOffset, VEC& ev, const DataTypes::ShapeType& evShape, typename VEC::size_type evOffset) { if (DataTypes::getRank(inShape) == 2) { int i0, i1; int s0=inShape[0]; int s1=inShape[1]; for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1)] = (in[inOffset+DataTypes::getRelIndex(inShape,i0,i1)] + in[inOffset+DataTypes::getRelIndex(inShape,i1,i0)]) / 2.0; } } } else if (DataTypes::getRank(inShape) == 4) { int i0, i1, i2, i3; int s0=inShape[0]; int s1=inShape[1]; int s2=inShape[2]; int s3=inShape[3]; for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = (in[inOffset+DataTypes::getRelIndex(inShape,i0,i1,i2,i3)] + in[inOffset+DataTypes::getRelIndex(inShape,i2,i3,i0,i1)]) / 2.0; } } } } } } /** \brief computes a antisymmetric matrix from your square matrix A: (A - transpose(A)) / 2 \param in - vector containing the matrix A \param inShape - shape of the matrix A \param inOffset - the beginning of A within the vector in \param ev - vector to store the output matrix \param evShape - expected shape of the output matrix \param evOffset - starting location for storing ev in vector ev */ template<typename VEC> inline void antisymmetric(const VEC& in, const DataTypes::ShapeType& inShape, typename VEC::size_type inOffset, VEC& ev, const DataTypes::ShapeType& evShape, typename VEC::size_type evOffset) { if (DataTypes::getRank(inShape) == 2) { int i0, i1; int s0=inShape[0]; int s1=inShape[1]; for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1)] = (in[inOffset+DataTypes::getRelIndex(inShape,i0,i1)] - in[inOffset+DataTypes::getRelIndex(inShape,i1,i0)]) / 2.0; } } } else if (DataTypes::getRank(inShape) == 4) { int i0, i1, i2, i3; int s0=inShape[0]; int s1=inShape[1]; int s2=inShape[2]; int s3=inShape[3]; for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = (in[inOffset+DataTypes::getRelIndex(inShape,i0,i1,i2,i3)] - in[inOffset+DataTypes::getRelIndex(inShape,i2,i3,i0,i1)]) / 2.0; } } } } } } /** \brief computes an hermitian matrix from your square matrix A: (A + adjoint(A)) / 2 \param in - vector containing the matrix A \param inShape - shape of the matrix A \param inOffset - the beginning of A within the vector in \param ev - vector to store the output matrix \param evShape - expected shape of the output matrix \param evOffset - starting location for storing ev in vector ev */ void hermitian(const DataTypes::CplxVectorType& in, const DataTypes::ShapeType& inShape, DataTypes::CplxVectorType::size_type inOffset, DataTypes::CplxVectorType& ev, const DataTypes::ShapeType& evShape, DataTypes::CplxVectorType::size_type evOffset); /** \brief computes a antihermitian matrix from your square matrix A: (A - adjoint(A)) / 2 \param in - vector containing the matrix A \param inShape - shape of the matrix A \param inOffset - the beginning of A within the vector in \param ev - vector to store the output matrix \param evShape - expected shape of the output matrix \param evOffset - starting location for storing ev in vector ev */ void antihermitian(const DataTypes::CplxVectorType& in, const DataTypes::ShapeType& inShape, typename DataTypes::CplxVectorType::size_type inOffset, DataTypes::CplxVectorType& ev, const DataTypes::ShapeType& evShape, typename DataTypes::CplxVectorType::size_type evOffset); /** \brief computes the trace of a matrix \param in - vector containing the input matrix \param inShape - shape of the input matrix \param inOffset - the beginning of the input matrix within the vector "in" \param ev - vector to store the output matrix \param evShape - expected shape of the output matrix \param evOffset - starting location for storing the output matrix in vector ev \param axis_offset */ template <class VEC> inline void trace(const VEC& in, const DataTypes::ShapeType& inShape, typename VEC::size_type inOffset, VEC& ev, const DataTypes::ShapeType& evShape, typename VEC::size_type evOffset, int axis_offset) { for (int j=0;j<DataTypes::noValues(evShape);++j) { ev[evOffset+j]=0; } if (DataTypes::getRank(inShape) == 2) { int s0=inShape[0]; // Python wrapper limits to square matrix int i; for (i=0; i<s0; i++) { ev[evOffset/*+DataTypes::getRelIndex(evShape)*/] += in[inOffset+DataTypes::getRelIndex(inShape,i,i)]; } } else if (DataTypes::getRank(inShape) == 3) { if (axis_offset==0) { int s0=inShape[0]; int s2=inShape[2]; int i0, i2; for (i0=0; i0<s0; i0++) { for (i2=0; i2<s2; i2++) { ev[evOffset+DataTypes::getRelIndex(evShape,i2)] += in[inOffset+DataTypes::getRelIndex(inShape,i0,i0,i2)]; } } } else if (axis_offset==1) { int s0=inShape[0]; int s1=inShape[1]; int i0, i1; for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0)] += in[inOffset+DataTypes::getRelIndex(inShape,i0,i1,i1)]; } } } } else if (DataTypes::getRank(inShape) == 4) { if (axis_offset==0) { int s0=inShape[0]; int s2=inShape[2]; int s3=inShape[3]; int i0, i2, i3; for (i0=0; i0<s0; i0++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i2,i3)] += in[inOffset+DataTypes::getRelIndex(inShape,i0,i0,i2,i3)]; } } } } else if (axis_offset==1) { int s0=inShape[0]; int s1=inShape[1]; int s3=inShape[3]; int i0, i1, i3; for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i3)] += in[inOffset+DataTypes::getRelIndex(inShape,i0,i1,i1,i3)]; } } } } else if (axis_offset==2) { int s0=inShape[0]; int s1=inShape[1]; int s2=inShape[2]; int i0, i1, i2; for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1)] += in[inOffset+DataTypes::getRelIndex(inShape,i0,i1,i2,i2)]; } } } } } } /** \brief Transpose each data point of this Data object around the given axis. \param in - vector containing the input matrix \param inShape - shape of the input matrix \param inOffset - the beginning of the input matrix within the vector "in" \param ev - vector to store the output matrix \param evShape - expected shape of the output matrix \param evOffset - starting location for storing the output matrix in vector ev \param axis_offset */ // ESCRIPT_DLL_API can only export template specialisation! template <class VEC> inline void transpose(const VEC& in, const DataTypes::ShapeType& inShape, typename VEC::size_type inOffset, VEC& ev, const DataTypes::ShapeType& evShape, typename VEC::size_type evOffset, int axis_offset) { int inRank=DataTypes::getRank(inShape); if ( inRank== 4) { int s0=evShape[0]; int s1=evShape[1]; int s2=evShape[2]; int s3=evShape[3]; int i0, i1, i2, i3; if (axis_offset==1) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i3,i0,i1,i2)]; } } } } } else if (axis_offset==2) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i2,i3,i0,i1)]; } } } } } else if (axis_offset==3) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i1,i2,i3,i0)]; } } } } } else { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i0,i1,i2,i3)]; } } } } } } else if (inRank == 3) { int s0=evShape[0]; int s1=evShape[1]; int s2=evShape[2]; int i0, i1, i2; if (axis_offset==1) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2)] = in[inOffset+DataTypes::getRelIndex(inShape,i2,i0,i1)]; } } } } else if (axis_offset==2) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2)] = in[inOffset+DataTypes::getRelIndex(inShape,i1,i2,i0)]; } } } } else { // Copy the matrix unchanged for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2)] = in[inOffset+DataTypes::getRelIndex(inShape,i0,i1,i2)]; } } } } } else if (inRank == 2) { int s0=evShape[0]; int s1=evShape[1]; int i0, i1; if (axis_offset==1) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1)] = in[inOffset+DataTypes::getRelIndex(inShape,i1,i0)]; } } } else { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1)] = in[inOffset+DataTypes::getRelIndex(inShape,i0,i1)]; } } } } else if (inRank == 1) { int s0=evShape[0]; int i0; for (i0=0; i0<s0; i0++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0)] = in[inOffset+DataTypes::getRelIndex(inShape,i0)]; } } else if (inRank == 0) { ev[evOffset/*+DataTypes::getRelIndex(evShape,)*/] = in[inOffset/*+DataTypes::getRelIndex(inShape,)*/]; } else { throw DataException("Error - DataArrayView::transpose can only be calculated for rank 0, 1, 2, 3 or 4 objects."); } } /** \brief swaps the components axis0 and axis1. \param in - vector containing the input matrix \param inShape - shape of the input matrix \param inOffset - the beginning of the input matrix within the vector "in" \param ev - vector to store the output matrix \param evShape - expected shape of the output matrix \param evOffset - starting location for storing the output matrix in vector ev \param axis0 - axis index \param axis1 - axis index */ // ESCRIPT_DLL_API can only export template specialisation! template <class VEC> inline void swapaxes(const VEC& in, const DataTypes::ShapeType& inShape, typename VEC::size_type inOffset, VEC& ev, const DataTypes::ShapeType& evShape, typename VEC::size_type evOffset, int axis0, int axis1) { int inRank=DataTypes::getRank(inShape); if (inRank == 4) { int s0=evShape[0]; int s1=evShape[1]; int s2=evShape[2]; int s3=evShape[3]; int i0, i1, i2, i3; if (axis0==0) { if (axis1==1) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i1,i0,i2,i3)]; } } } } } else if (axis1==2) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i2,i1,i0,i3)]; } } } } } else if (axis1==3) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i3,i1,i2,i0)]; } } } } } } else if (axis0==1) { if (axis1==2) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i0,i2,i1,i3)]; } } } } } else if (axis1==3) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i0,i3,i2,i1)]; } } } } } } else if (axis0==2) { if (axis1==3) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { for (i3=0; i3<s3; i3++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2,i3)] = in[inOffset+DataTypes::getRelIndex(inShape,i0,i1,i3,i2)]; } } } } } } } else if ( inRank == 3) { int s0=evShape[0]; int s1=evShape[1]; int s2=evShape[2]; int i0, i1, i2; if (axis0==0) { if (axis1==1) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2)] = in[inOffset+DataTypes::getRelIndex(inShape,i1,i0,i2)]; } } } } else if (axis1==2) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2)] = in[inOffset+DataTypes::getRelIndex(inShape,i2,i1,i0)]; } } } } } else if (axis0==1) { if (axis1==2) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { for (i2=0; i2<s2; i2++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1,i2)] = in[inOffset+DataTypes::getRelIndex(inShape,i0,i2,i1)]; } } } } } } else if ( inRank == 2) { int s0=evShape[0]; int s1=evShape[1]; int i0, i1; if (axis0==0) { if (axis1==1) { for (i0=0; i0<s0; i0++) { for (i1=0; i1<s1; i1++) { ev[evOffset+DataTypes::getRelIndex(evShape,i0,i1)] = in[inOffset+DataTypes::getRelIndex(inShape,i1,i0)]; } } } } } else { throw DataException("Error - DataArrayView::swapaxes can only be calculated for rank 2, 3 or 4 objects."); } } /** \brief solves a local eigenvalue problem \param in - vector containing the input matrix \param inShape - shape of the input matrix \param inOffset - the beginning of the input matrix within the vector "in" \param ev - vector to store the eigenvalues \param evShape - expected shape of the eigenvalues \param evOffset - starting location for storing the eigenvalues in vector ev */ ESCRIPT_DLL_API inline void eigenvalues(const DataTypes::RealVectorType& in, const DataTypes::ShapeType& inShape, typename DataTypes::RealVectorType::size_type inOffset, DataTypes::RealVectorType& ev, const DataTypes::ShapeType& evShape, typename DataTypes::RealVectorType::size_type evOffset) { typename DataTypes::RealVectorType::ElementType in00,in10,in20,in01,in11,in21,in02,in12,in22; typename DataTypes::RealVectorType::ElementType ev0,ev1,ev2; int s=inShape[0]; if (s==1) { in00=in[inOffset+DataTypes::getRelIndex(inShape,0,0)]; eigenvalues1(in00,&ev0); ev[evOffset+DataTypes::getRelIndex(evShape,0)]=ev0; } else if (s==2) { in00=in[inOffset+DataTypes::getRelIndex(inShape,0,0)]; in10=in[inOffset+DataTypes::getRelIndex(inShape,1,0)]; in01=in[inOffset+DataTypes::getRelIndex(inShape,0,1)]; in11=in[inOffset+DataTypes::getRelIndex(inShape,1,1)]; eigenvalues2(in00,(in01+in10)/2.,in11,&ev0,&ev1); ev[evOffset+DataTypes::getRelIndex(evShape,0)]=ev0; ev[evOffset+DataTypes::getRelIndex(evShape,1)]=ev1; } else if (s==3) { in00=in[inOffset+DataTypes::getRelIndex(inShape,0,0)]; in10=in[inOffset+DataTypes::getRelIndex(inShape,1,0)]; in20=in[inOffset+DataTypes::getRelIndex(inShape,2,0)]; in01=in[inOffset+DataTypes::getRelIndex(inShape,0,1)]; in11=in[inOffset+DataTypes::getRelIndex(inShape,1,1)]; in21=in[inOffset+DataTypes::getRelIndex(inShape,2,1)]; in02=in[inOffset+DataTypes::getRelIndex(inShape,0,2)]; in12=in[inOffset+DataTypes::getRelIndex(inShape,1,2)]; in22=in[inOffset+DataTypes::getRelIndex(inShape,2,2)]; eigenvalues3(in00,(in01+in10)/2.,(in02+in20)/2.,in11,(in21+in12)/2.,in22, &ev0,&ev1,&ev2); ev[evOffset+DataTypes::getRelIndex(evShape,0)]=ev0; ev[evOffset+DataTypes::getRelIndex(evShape,1)]=ev1; ev[evOffset+DataTypes::getRelIndex(evShape,2)]=ev2; } } inline void eigenvalues(const DataTypes::CplxVectorType& in, const DataTypes::ShapeType& inShape, typename DataTypes::CplxVectorType::size_type inOffset, DataTypes::CplxVectorType& ev, const DataTypes::ShapeType& evShape, typename DataTypes::CplxVectorType::size_type evOffset) { #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-variable" typename DataTypes::CplxVectorType::ElementType in00,in10,in20,in01,in11,in21,in02,in12,in22; typename DataTypes::CplxVectorType::ElementType ev0,ev1,ev2; #pragma clang diagnostic pop int s=inShape[0]; if (s==1) { in00=in[inOffset+DataTypes::getRelIndex(inShape,0,0)]; eigenvalues1(in00,&ev0); ev[evOffset+DataTypes::getRelIndex(evShape,0)]=ev0; } else if (s==2) { in00=in[inOffset+DataTypes::getRelIndex(inShape,0,0)]; in10=in[inOffset+DataTypes::getRelIndex(inShape,1,0)]; in01=in[inOffset+DataTypes::getRelIndex(inShape,0,1)]; in11=in[inOffset+DataTypes::getRelIndex(inShape,1,1)]; eigenvalues2(in00,(in01+in10)/2.,in11,&ev0,&ev1); ev[evOffset+DataTypes::getRelIndex(evShape,0)]=ev0; ev[evOffset+DataTypes::getRelIndex(evShape,1)]=ev1; } else if (s==3) { // this doesn't work yet // in00=in[inOffset+DataTypes::getRelIndex(inShape,0,0)]; // in10=in[inOffset+DataTypes::getRelIndex(inShape,1,0)]; // in20=in[inOffset+DataTypes::getRelIndex(inShape,2,0)]; // in01=in[inOffset+DataTypes::getRelIndex(inShape,0,1)]; // in11=in[inOffset+DataTypes::getRelIndex(inShape,1,1)]; // in21=in[inOffset+DataTypes::getRelIndex(inShape,2,1)]; // in02=in[inOffset+DataTypes::getRelIndex(inShape,0,2)]; // in12=in[inOffset+DataTypes::getRelIndex(inShape,1,2)]; // in22=in[inOffset+DataTypes::getRelIndex(inShape,2,2)]; // eigenvalues3(in00,(in01+in10)/2.,(in02+in20)/2.,in11,(in21+in12)/2.,in22, // &ev0,&ev1,&ev2); // ev[evOffset+DataTypes::getRelIndex(evShape,0)]=ev0; // ev[evOffset+DataTypes::getRelIndex(evShape,1)]=ev1; // ev[evOffset+DataTypes::getRelIndex(evShape,2)]=ev2; } } /** \brief solves a local eigenvalue problem \param in - vector containing the input matrix \param inShape - shape of the input matrix \param inOffset - the beginning of the input matrix within the vector "in" \param ev - vector to store the eigenvalues \param evShape - expected shape of the eigenvalues \param evOffset - starting location for storing the eigenvalues in ev \param V - vector to store the eigenvectors \param VShape - expected shape of the eigenvectors \param VOffset - starting location for storing the eigenvectors in V \param tol - Input - eigenvalues with relative difference tol are treated as equal */ ESCRIPT_DLL_API inline void eigenvalues_and_eigenvectors(const DataTypes::RealVectorType& in, const DataTypes::ShapeType& inShape, DataTypes::RealVectorType::size_type inOffset, DataTypes::RealVectorType& ev, const DataTypes::ShapeType& evShape, DataTypes::RealVectorType::size_type evOffset, DataTypes::RealVectorType& V, const DataTypes::ShapeType& VShape, DataTypes::RealVectorType::size_type VOffset, const double tol=1.e-13) { double in00,in10,in20,in01,in11,in21,in02,in12,in22; double V00,V10,V20,V01,V11,V21,V02,V12,V22; double ev0,ev1,ev2; int s=inShape[0]; if (s==1) { in00=in[inOffset+DataTypes::getRelIndex(inShape,0,0)]; eigenvalues_and_eigenvectors1(in00,&ev0,&V00,tol); ev[evOffset+DataTypes::getRelIndex(evShape,0)]=ev0; V[inOffset+DataTypes::getRelIndex(VShape,0,0)]=V00; } else if (s==2) { in00=in[inOffset+DataTypes::getRelIndex(inShape,0,0)]; in10=in[inOffset+DataTypes::getRelIndex(inShape,1,0)]; in01=in[inOffset+DataTypes::getRelIndex(inShape,0,1)]; in11=in[inOffset+DataTypes::getRelIndex(inShape,1,1)]; eigenvalues_and_eigenvectors2(in00,(in01+in10)/2.,in11, &ev0,&ev1,&V00,&V10,&V01,&V11,tol); ev[evOffset+DataTypes::getRelIndex(evShape,0)]=ev0; ev[evOffset+DataTypes::getRelIndex(evShape,1)]=ev1; V[inOffset+DataTypes::getRelIndex(VShape,0,0)]=V00; V[inOffset+DataTypes::getRelIndex(VShape,1,0)]=V10; V[inOffset+DataTypes::getRelIndex(VShape,0,1)]=V01; V[inOffset+DataTypes::getRelIndex(VShape,1,1)]=V11; } else if (s==3) { in00=in[inOffset+DataTypes::getRelIndex(inShape,0,0)]; in10=in[inOffset+DataTypes::getRelIndex(inShape,1,0)]; in20=in[inOffset+DataTypes::getRelIndex(inShape,2,0)]; in01=in[inOffset+DataTypes::getRelIndex(inShape,0,1)]; in11=in[inOffset+DataTypes::getRelIndex(inShape,1,1)]; in21=in[inOffset+DataTypes::getRelIndex(inShape,2,1)]; in02=in[inOffset+DataTypes::getRelIndex(inShape,0,2)]; in12=in[inOffset+DataTypes::getRelIndex(inShape,1,2)]; in22=in[inOffset+DataTypes::getRelIndex(inShape,2,2)]; eigenvalues_and_eigenvectors3(in00,(in01+in10)/2.,(in02+in20)/2.,in11,(in21+in12)/2.,in22, &ev0,&ev1,&ev2, &V00,&V10,&V20,&V01,&V11,&V21,&V02,&V12,&V22,tol); ev[evOffset+DataTypes::getRelIndex(evShape,0)]=ev0; ev[evOffset+DataTypes::getRelIndex(evShape,1)]=ev1; ev[evOffset+DataTypes::getRelIndex(evShape,2)]=ev2; V[inOffset+DataTypes::getRelIndex(VShape,0,0)]=V00; V[inOffset+DataTypes::getRelIndex(VShape,1,0)]=V10; V[inOffset+DataTypes::getRelIndex(VShape,2,0)]=V20; V[inOffset+DataTypes::getRelIndex(VShape,0,1)]=V01; V[inOffset+DataTypes::getRelIndex(VShape,1,1)]=V11; V[inOffset+DataTypes::getRelIndex(VShape,2,1)]=V21; V[inOffset+DataTypes::getRelIndex(VShape,0,2)]=V02; V[inOffset+DataTypes::getRelIndex(VShape,1,2)]=V12; V[inOffset+DataTypes::getRelIndex(VShape,2,2)]=V22; } } /** Inline function definitions. */ template <class VEC> inline bool checkOffset(const VEC& data, const DataTypes::ShapeType& shape, typename VEC::size_type offset) { return (data.size() >= (offset+DataTypes::noValues(shape))); } /** * This assumes that all data involved have the same points per sample and same shape */ template <class ResVEC, class LVEC, class RSCALAR> void binaryOpVectorRightScalar(ResVEC& res, // where result is to be stored typename ResVEC::size_type resOffset, // offset in the result vector to start storing results const typename ResVEC::size_type samplesToProcess, // number of samples to be updated in the result const typename ResVEC::size_type sampleSize, // number of values in each sample const LVEC& left, // LHS of calculation typename LVEC::size_type leftOffset, // where to start reading LHS values const RSCALAR* right, // RHS of the calculation const bool rightreset, // true if RHS is providing a single sample of 1 value only escript::ES_optype operation, // operation to perform bool singleleftsample) // set to false for normal operation { size_t substep=(rightreset?0:1); switch (operation) { case ADD: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(singleleftsample?0:i*sampleSize); const RSCALAR* rpos=right+(rightreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=left[leftbase+j]+*rpos; } } } break; case POW: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(singleleftsample?0:i*sampleSize); const RSCALAR* rpos=right+(rightreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=pow(left[leftbase+j],*rpos); } } } break; case SUB: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(singleleftsample?0:i*sampleSize); const RSCALAR* rpos=right+(rightreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=left[leftbase+j]-*rpos; } } } break; case MUL: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(singleleftsample?0:i*sampleSize); const RSCALAR* rpos=right+(rightreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=left[leftbase+j] * *rpos; } } } break; case DIV: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(singleleftsample?0:i*sampleSize); const RSCALAR* rpos=right+(rightreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=left[leftbase+j]/ *rpos; } } } break; default: throw DataException("Unsupported binary operation"); } } template<> void binaryOpVectorRightScalar(DataTypes::RealVectorType& res, // where result is to be stored typename DataTypes::RealVectorType::size_type resOffset, // offset in the result vector to start storing results const typename DataTypes::RealVectorType::size_type samplesToProcess, // number of samples to be updated in the result const typename DataTypes::RealVectorType::size_type sampleSize, // number of values in each sample const DataTypes::RealVectorType& left, // LHS of calculation typename DataTypes::RealVectorType::size_type leftOffset, // where to start reading LHS values const DataTypes::real_t* right, // RHS of the calculation const bool rightreset, // true if RHS is providing a single sample of 1 value only escript::ES_optype operation, // operation to perform bool singleleftsample); /** * This assumes that all data involved have the same points per sample and same shape */ template <class ResVEC, class LSCALAR, class RVEC> void binaryOpVectorLeftScalar(ResVEC& res, // where result is to be stored typename ResVEC::size_type resOffset, // offset in the result vector to start storing results const typename ResVEC::size_type samplesToProcess, // number of samples to be updated in the result const typename ResVEC::size_type sampleSize, // number of values in each sample const LSCALAR* left, // LHS of calculation const bool leftreset, // true if LHS is providing a single sample of 1 value only const RVEC& right, // RHS of the calculation typename RVEC::size_type rightOffset, // where to start reading RHS values escript::ES_optype operation, // operation to perform bool singlerightsample) // right consists of a single sample { size_t substep=(leftreset?0:1); switch (operation) { case ADD: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename RVEC::size_type rightbase=rightOffset+(singlerightsample?0:i*sampleSize); const LSCALAR* lpos=left+(leftreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=*lpos+right[rightbase+j]; } } } break; case POW: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename RVEC::size_type rightbase=rightOffset+(singlerightsample?0:i*sampleSize); const LSCALAR* lpos=left+(leftreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=pow(*lpos,right[rightbase+j]); } } } break; case SUB: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename RVEC::size_type rightbase=rightOffset+(singlerightsample?0:i*sampleSize); const LSCALAR* lpos=left+(leftreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=*lpos-right[rightbase+j]; } } } break; case MUL: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename RVEC::size_type rightbase=rightOffset+(singlerightsample?0:i*sampleSize); const LSCALAR* lpos=left+(leftreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=*lpos*right[rightbase+j]; } } } break; case DIV: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename RVEC::size_type rightbase=rightOffset+(singlerightsample?0:i*sampleSize); const LSCALAR* lpos=left+(leftreset?0:i*substep); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=*lpos/right[rightbase+j]; } } } break; default: throw DataException("Unsupported binary operation"); } } template <> void binaryOpVectorLeftScalar(DataTypes::RealVectorType& res, // where result is to be stored typename DataTypes::RealVectorType::size_type resOffset, // offset in the result vector to start storing results const typename DataTypes::RealVectorType::size_type samplesToProcess, // number of samples to be updated in the result const typename DataTypes::RealVectorType::size_type sampleSize, // number of values in each sample const DataTypes::real_t* left, // LHS of calculation const bool leftreset, // true if LHS is providing a single sample of 1 value only const DataTypes::RealVectorType& right, // RHS of the calculation typename DataTypes::RealVectorType::size_type rightOffset, // where to start reading RHS values escript::ES_optype operation, // operation to perform bool singlerightsample); // right consists of a single sample /** * This assumes that all data involved have the same points per sample and same shape */ template <class ResVEC, class LVEC, class RVEC> void binaryOpVector(ResVEC& res, // where result is to be stored typename ResVEC::size_type resOffset, // offset in the result vector to start storing results const typename ResVEC::size_type samplesToProcess, // number of samples to be updated in the result const typename ResVEC::size_type sampleSize, // number of values in each sample const LVEC& left, // LHS of calculation typename LVEC::size_type leftOffset, // where to start reading LHS values const bool leftreset, // Is LHS only supplying a single sample instead of a bunch of them const RVEC& right, // RHS of the calculation typename RVEC::size_type rightOffset, // where to start reading RHS values const bool rightreset, // Is RHS only supplying a single sample instead of a bunch of them escript::ES_optype operation) // operation to perform { switch (operation) { case ADD: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(leftreset?0:i*sampleSize); typename RVEC::size_type rightbase=rightOffset+(rightreset?0:i*sampleSize); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=left[leftbase+j]+right[rightbase+j]; } } } break; case POW: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(leftreset?0:i*sampleSize); typename RVEC::size_type rightbase=rightOffset+(rightreset?0:i*sampleSize); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=pow(left[leftbase+j],right[rightbase+j]); } } } break; case SUB: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(leftreset?0:i*sampleSize); typename RVEC::size_type rightbase=rightOffset+(rightreset?0:i*sampleSize); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=left[leftbase+j]-right[rightbase+j]; } } } break; case MUL: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(leftreset?0:i*sampleSize); typename RVEC::size_type rightbase=rightOffset+(rightreset?0:i*sampleSize); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=left[leftbase+j]*right[rightbase+j]; } } } break; case DIV: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<samplesToProcess;++i) { typename LVEC::size_type leftbase=leftOffset+(leftreset?0:i*sampleSize); typename RVEC::size_type rightbase=rightOffset+(rightreset?0:i*sampleSize); for (typename ResVEC::size_type j=0;j<sampleSize;++j) { res[i*sampleSize+resOffset+j]=left[leftbase+j]/right[rightbase+j]; } } } break; default: throw DataException("Unsupported binary operation"); } } template <> void binaryOpVector(DataTypes::RealVectorType& res, // where result is to be stored typename DataTypes::RealVectorType::size_type resOffset, // offset in the result vector to start storing results const typename DataTypes::RealVectorType::size_type samplesToProcess, // number of samples to be updated in the result const typename DataTypes::RealVectorType::size_type sampleSize, // number of values in each sample const DataTypes::RealVectorType& left, // LHS of calculation typename DataTypes::RealVectorType::size_type leftOffset, // where to start reading LHS values const bool leftreset, // Is LHS only supplying a single sample instead of a bunch of them const DataTypes::RealVectorType& right, // RHS of the calculation typename DataTypes::RealVectorType::size_type rightOffset, // where to start reading RHS values const bool rightreset, // Is RHS only supplying a single sample instead of a bunch of them escript::ES_optype operation); // operation to perform #define OPVECLAZYBODY(X) \ for (size_t j=0;j<onumsteps;++j)\ {\ for (size_t i=0;i<numsteps;++i,res+=resultStep) \ { \ for (size_t s=0; s<chunksize; ++s)\ {\ res[s] = X;\ }\ /* tensor_binary_operation< TYPE >(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, X);*/ \ lroffset+=leftstep; \ rroffset+=rightstep; \ }\ lroffset+=oleftstep;\ rroffset+=orightstep;\ } /** * This assumes that all data involved have the same points per sample and same shape * This version is to be called from within DataLazy. * It does not have openmp around loops because it will be evaluating individual samples * (Which will be done within an enclosing openmp region. */ template <class ResELT, class LELT, class RELT> void binaryOpVectorLazyArithmeticHelper(ResELT* res, const LELT* left, const RELT* right, const size_t chunksize, const size_t onumsteps, const size_t numsteps, const size_t resultStep, const size_t leftstep, const size_t rightstep, const size_t oleftstep, const size_t orightstep, size_t lroffset, size_t rroffset, escript::ES_optype operation) // operation to perform { switch (operation) { case ADD: OPVECLAZYBODY((left[lroffset+s]+right[rroffset+s])); break; case POW: OPVECLAZYBODY(pow(left[lroffset+s],right[rroffset+s])) break; case SUB: OPVECLAZYBODY(left[lroffset+s]-right[rroffset+s]) break; case MUL: OPVECLAZYBODY(left[lroffset+s]*right[rroffset+s]) break; case DIV: OPVECLAZYBODY(left[lroffset+s]/right[rroffset+s]) break; default: ESYS_ASSERT(false, "Invalid operation. This should never happen!"); // I can't throw here because this will be called inside a parallel section } } /** * This assumes that all data involved have the same points per sample and same shape * This version is to be called from within DataLazy. * It does not have openmp around loops because it will be evaluating individual samples * (Which will be done within an enclosing openmp region. */ template <class ResELT, class LELT, class RELT> void binaryOpVectorLazyRelationalHelper(ResELT* res, const LELT* left, const RELT* right, const size_t chunksize, const size_t onumsteps, const size_t numsteps, const size_t resultStep, const size_t leftstep, const size_t rightstep, const size_t oleftstep, const size_t orightstep, size_t lroffset, size_t rroffset, escript::ES_optype operation) // operation to perform { switch (operation) { case LESS: OPVECLAZYBODY(left[lroffset+s]<right[rroffset+s]) break; case GREATER: OPVECLAZYBODY(left[lroffset+s]>right[rroffset+s]) break; case GREATER_EQUAL: OPVECLAZYBODY(left[lroffset+s]>=right[rroffset+s]) break; case LESS_EQUAL: OPVECLAZYBODY(left[lroffset+s]<=right[rroffset+s]) break; default: ESYS_ASSERT(false, "Invalid operation. This should never happen!"); // I can't throw here because this will be called inside a parallel section } } /** * This assumes that all data involved have the same points per sample and same shape */ /* trying to make a single version for all Tagged+Expanded interactions */ template <class ResVEC, class LVEC, class RVEC> void binaryOpVectorTagged(ResVEC& res, // where result is to be stored const typename ResVEC::size_type samplesToProcess, // number of samples to be updated in the result const typename ResVEC::size_type DPPSample, // number of datapoints per sample const typename ResVEC::size_type DPSize, // datapoint size const LVEC& left, // LHS of calculation bool leftscalar, const RVEC& right, // RHS of the calculation bool rightscalar, bool lefttagged, // true if left object is the tagged one const DataTagged& tagsource, // where to get tag offsets from escript::ES_optype operation) // operation to perform { typename ResVEC::size_type lstep=leftscalar?1:DPSize; typename ResVEC::size_type rstep=rightscalar?1:DPSize; typename ResVEC::size_type limit=samplesToProcess*DPPSample; switch (operation) { case ADD: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<limit;++i) { typename LVEC::size_type leftbase=(lefttagged?tagsource.getPointOffset(i/DPPSample,0):i*lstep); // only one of these typename RVEC::size_type rightbase=(lefttagged?i*rstep:tagsource.getPointOffset(i/DPPSample,0)); // will apply for (typename ResVEC::size_type j=0;j<DPSize;++j) { res[i*DPSize+j]=left[leftbase+j*(!leftscalar)]+right[rightbase+j*(!rightscalar)]; } } } break; case POW: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<limit;++i) { typename LVEC::size_type leftbase=(lefttagged?tagsource.getPointOffset(i/DPPSample,0):i*lstep); // only one of these typename RVEC::size_type rightbase=(lefttagged?i*rstep:tagsource.getPointOffset(i/DPPSample,0)); // will apply for (typename ResVEC::size_type j=0;j<DPSize;++j) { res[i*DPSize+j]=pow(left[leftbase+j*(!leftscalar)],right[rightbase+j*(!rightscalar)]); } } } break; case SUB: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<limit;++i) { typename LVEC::size_type leftbase=(lefttagged?tagsource.getPointOffset(i/DPPSample,0):i*lstep); // only one of these typename RVEC::size_type rightbase=(lefttagged?i*rstep:tagsource.getPointOffset(i/DPPSample,0)); // will apply for (typename ResVEC::size_type j=0;j<DPSize;++j) { res[i*DPSize+j]=left[leftbase+j*(!leftscalar)]-right[rightbase+j*(!rightscalar)]; } } } break; case MUL: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<limit;++i) { typename LVEC::size_type leftbase=(lefttagged?tagsource.getPointOffset(i/DPPSample,0):i*lstep); // only one of these typename RVEC::size_type rightbase=(lefttagged?i*rstep:tagsource.getPointOffset(i/DPPSample,0)); // will apply for (typename ResVEC::size_type j=0;j<DPSize;++j) { res[i*DPSize+j]=left[leftbase+j*(!leftscalar)]*right[rightbase+j*(!rightscalar)]; } } } break; case DIV: { #pragma omp parallel for for (typename ResVEC::size_type i=0;i<limit;++i) { typename LVEC::size_type leftbase=(lefttagged?tagsource.getPointOffset(i/DPPSample,0):i*lstep); // only one of these typename RVEC::size_type rightbase=(lefttagged?i*rstep:tagsource.getPointOffset(i/DPPSample,0)); // will apply for (typename ResVEC::size_type j=0;j<DPSize;++j) { res[i*DPSize+j]=left[leftbase+j*(!leftscalar)]/right[rightbase+j*(!rightscalar)]; } } } break; default: throw DataException("Unsupported binary operation"); } } template<> void binaryOpVectorTagged(DataTypes::RealVectorType& res, // where result is to be stored const typename DataTypes::RealVectorType::size_type samplesToProcess, // number of samples to be updated in the result const typename DataTypes::RealVectorType::size_type DPPSample, // number of datapoints per sample const typename DataTypes::RealVectorType::size_type DPSize, // datapoint size const DataTypes::RealVectorType& left, // LHS of calculation const bool leftscalar, const DataTypes::RealVectorType& right, // RHS of the calculation const bool rightscalar, const bool lefttagged, // true if left object is the tagged one const DataTagged& tagsource, // where to get tag offsets from escript::ES_optype operation); /** \brief Perform the given data point reduction operation on the data point specified by the given offset into the view. Reduces all elements of the data point using the given operation, returning the result as a scalar. Operation must be a pointer to a function. Called by escript::algorithm. \param left - vector containing the datapoint \param shape - shape of datapoints in the vector \param offset - beginning of datapoint in the vector \param operation - Input - Operation to apply. Must be a pointer to a function. \param initial_value */ template <class BinaryFunction> inline DataTypes::real_t reductionOpVector(const DataTypes::RealVectorType& left, const DataTypes::ShapeType& leftShape, DataTypes::RealVectorType::size_type offset, BinaryFunction operation, DataTypes::real_t initial_value) { ESYS_ASSERT((left.size()>0)&&checkOffset(left,leftShape,offset), "Couldn't perform reductionOp due to insufficient storage."); DataTypes::real_t current_value=initial_value; for (DataTypes::RealVectorType::size_type i=0;i<DataTypes::noValues(leftShape);i++) { current_value=operation(current_value,left[offset+i]); } return current_value; } template <class BinaryFunction> inline DataTypes::real_t reductionOpVector(const DataTypes::CplxVectorType& left, const DataTypes::ShapeType& leftShape, DataTypes::CplxVectorType::size_type offset, BinaryFunction operation, DataTypes::real_t initial_value) { ESYS_ASSERT((left.size()>0)&&checkOffset(left,leftShape,offset), "Couldn't perform reductionOp due to insufficient storage."); DataTypes::real_t current_value=initial_value; for (DataTypes::RealVectorType::size_type i=0;i<DataTypes::noValues(leftShape);i++) { current_value=operation(current_value,left[offset+i]); } return current_value; } /** \brief computes the inverses of square (up to 3x3) matricies \param in - vector containing the input matricies \param inShape - shape of the input matricies \param inOffset - the beginning of the input matricies within the vector "in" \param out - vector to store the inverses \param outShape - expected shape of the inverses \param outOffset - starting location for storing the inverses in out \param count - number of matricies to invert \param helper - associated working storage \exception DataException if input and output are not the correct shape or if any of the matricies are not invertible. \return 0 on success, on failure the return value should be passed to matrixInverseError(int err). */ int matrix_inverse(const DataTypes::RealVectorType& in, const DataTypes::ShapeType& inShape, DataTypes::RealVectorType::size_type inOffset, DataTypes::RealVectorType& out, const DataTypes::ShapeType& outShape, DataTypes::RealVectorType::size_type outOffset, int count, LapackInverseHelper& helper); /** \brief throws an appropriate exception based on failure of matrix_inverse. \param err - error code returned from matrix_inverse \warning do not call in a parallel region since it throws. */ void matrixInverseError(int err); /** \brief returns true if the vector contains NaN */ inline bool vectorHasNaN(const DataTypes::RealVectorType& in, DataTypes::RealVectorType::size_type inOffset, size_t count) { for (size_t z=inOffset;z<inOffset+count;++z) { if (nancheck(in[z])) { return true; } } return false; } inline bool vectorHasNaN(const DataTypes::CplxVectorType& in, DataTypes::CplxVectorType::size_type inOffset, size_t count) { for (size_t z=inOffset;z<inOffset+count;++z) { if (nancheck(in[z])) { return true; } } return false; } } // end namespace escript #endif // __ESCRIPT_DATAMATHS_H__
transpose.c
/* Copyright (c) 2013, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /******************************************************************* NAME: transpose PURPOSE: This OpenMP program measures the time for the transpose of a column-major stored matrix into a row-major stored matrix. USAGE: Program input is three command line arguments that give the matrix order, the number of times to repeat the operation (iterations), and the number of threads to use: transpose <# threads> <matrix_size> <# iterations> [tile size] An optional parameter specifies the tile size used to divide the individual matrix blocks for improved cache and TLB performance. The output consists of diagnostics to make sure the transpose worked and timing statistics. FUNCTIONS CALLED: Other than OpenMP or standard C functions, the following functions are used in this program: wtime() portable wall-timer interface. bail_out() test_results() Verify that the transpose worked HISTORY: Written by Tim Mattson, April 1999. Updated by Rob Van der Wijngaart, December 2005. *******************************************************************/ #include <par-res-kern_general.h> #include <par-res-kern_omp.h> /* Constant to shift column index */ #define COL_SHIFT 1000.00 /* Constant to shift row index */ #define ROW_SHIFT 0.001 static double test_results (int , double*); int main(int argc, char ** argv) { int order; /* order of a the matrix */ int tile_size=32; /* default tile size for tiling of local transpose */ int iterations; /* number of times to do the transpose */ int i, j, it, jt, iter; /* dummies */ double bytes; /* combined size of matrices */ double * RESTRICT A; /* buffer to hold original matrix */ double * RESTRICT B; /* buffer to hold transposed matrix */ double errsq; /* squared error */ double epsilon=1.e-8; /* error tolerance */ double trans_time, /* timing parameters */ avgtime = 0.0, maxtime = 0.0, mintime = 366.0*24.0*3600.0; /* set the minimum time to a large value; one leap year should be enough */ int nthread_input, nthread; int num_error=0; /* flag that signals that requested and obtained numbers of threads are the same */ /********************************************************************* ** read and test input parameters *********************************************************************/ if (argc != 4 && argc != 5){ printf("Usage: %s <# threads> <# iterations> <matrix order> [tile size]\n", *argv); exit(EXIT_FAILURE); } /* Take number of threads to request from command line */ nthread_input = atoi(*++argv); if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) { printf("ERROR: Invalid number of threads: %d\n", nthread_input); exit(EXIT_FAILURE); } omp_set_num_threads(nthread_input); iterations = atoi(*++argv); if (iterations < 1){ printf("ERROR: iterations must be >= 1 : %d \n",iterations); exit(EXIT_FAILURE); } order = atoi(*++argv); if (order < 0){ printf("ERROR: Matrix Order must be greater than 0 : %d \n", order); exit(EXIT_FAILURE); } if (argc == 5) tile_size = atoi(*++argv); /* a non-positive tile size means no tiling of the local transpose */ if (tile_size <=0) tile_size = order; /********************************************************************* ** Allocate space for the input and transpose matrix *********************************************************************/ A = (double *)malloc(order*order*sizeof(double)); if (A == NULL){ printf(" Error allocating space for input matrix\n"); exit(EXIT_FAILURE); } B = (double *)malloc(order*order*sizeof(double)); if (B == NULL){ printf(" Error allocating space for transposed matrix\n"); exit(EXIT_FAILURE); } bytes = 2.0 * sizeof(double) * order * order; #pragma omp parallel private (iter) { #pragma omp master { nthread = omp_get_num_threads(); printf("OpenMP Matrix transpose: B = A^T\n"); if (nthread != nthread_input) { num_error = 1; printf("ERROR: number of requested threads %d does not equal ", nthread_input); printf("number of spawned threads %d\n", nthread); } else { printf("Number of threads = %i;\n",nthread_input); printf("Matrix order = %d\n", order); if (tile_size < order) printf("Tile size = %d\n", tile_size); else printf("Untiled\n"); printf("Number of iterations = %d\n", iterations); } } bail_out(num_error); /* Fill the original matrix, set transpose to known garbage value. */ #pragma omp for private (j) for (i=0;i<order; i++) { for (j=0;j<order;j++) { *(A+i*order+j) = COL_SHIFT * j + ROW_SHIFT * i; *(B+i*order+j) = -1.0; } } errsq = 0.0; for (iter = 0; iter<iterations; iter++){ #pragma omp barrier #pragma omp master { trans_time = wtime(); } /* Transpose the matrix; only use tiling if the tile size is smaller than the matrix */ if (tile_size < order) { #pragma omp for private (j, it, jt) for (i=0; i<order; i+=tile_size) { for (j=0; j<order; j+=tile_size) { for (it=i; it<MIN(order,i+tile_size); it++){ for (jt=j; jt<MIN(order,j+tile_size);jt++){ B[it+order*jt] = A[jt+order*it]; } } } } } else { #pragma omp for private (j) for (i=0;i<order; i++) { for (j=0;j<order;j++) { B[i+order*j] = A[j+order*i]; } } } #pragma omp master { trans_time = wtime() - trans_time; #ifdef VERBOSE printf("\nFinished with transpose, using %lf seconds \n", trans_time); #endif if (iter>0 || iterations==1) { /* skip the first iteration */ avgtime = avgtime + trans_time; mintime = MIN(mintime, trans_time); maxtime = MAX(maxtime, trans_time); } } errsq += test_results (order, B); } /* end of iter loop */ } /* end of OpenMP parallel region */ /********************************************************************* ** Analyze and output results. *********************************************************************/ if (errsq < epsilon) { printf("Solution validates\n"); avgtime = avgtime/(double)(MAX(iterations-1,1)); printf("Rate (MB/s): %lf, Avg time (s): %lf, Min time (s): %lf", 1.0E-06 * bytes/mintime, avgtime, mintime); printf(", Max time (s): %lf\n", maxtime); #ifdef VERBOSE printf("Squared errors: %f \n", errsq); #endif exit(EXIT_SUCCESS); } else { printf("ERROR: Aggregate squared error %lf exceeds threshold %e\n", errsq, epsilon); exit(EXIT_FAILURE); } } /* end of main */ /* function that computes the error committed during the transposition */ double test_results (int order, double *trans) { double diff, errsq=0.0; int i,j; #pragma omp parallel for private(j,diff) reduction(+:errsq) for (i=0;i<order; i++) { for (j=0;j<order;j++) { diff = *(trans+i*order+j) - (COL_SHIFT*i + ROW_SHIFT * j); errsq += diff*diff; } } #ifdef VERBOSE #pragma omp master { printf(" Squared sum of differences: %f\n",errsq); } #endif return errsq; }
c_pi.c
/* *********************************************************************** This program is part of the OpenMP Source Code Repository http://www.pcg.ull.es/ompscr/ e-mail: ompscr@etsii.ull.es This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License (LICENSE file) along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA FILE: c_pi.c VERSION: 1.0 DATE: May 2004 COMMENTS TO: sande@csi.ull.es DESCRIPTION: Parallel implementation of PI generator using OpenMP COMMENTS: The area under the curve y=4/(1+x*x) between 0 and 1 provides a way to compute Pi The value of this integral can be approximated using a sum. REFERENCES: http://en.wikipedia.org/wiki/Pi http://nereida.deioc.ull.es/~llCoMP/examples/examples/pi/pi_description.html BASIC PRAGMAS: parallel USAGE: ./c_pi.par INPUT: Default precision OUTPUT: The value of PI FILE FORMATS: - RESTRICTIONS: - REVISION HISTORY: **************************************************************************/ //#include "OmpSCR.h" #include <omp.h> #include <stdio.h> #define DEFAULT_PREC 1000000 /* Default precision */ #define NUM_ARGS 1 #define NUM_TIMERS 1 int main(int argc, char *argv[]) { double PI25DT = 3.141592653589793238462643; double local, w, total_time, pi; long i, N; /* Precision */ int NUMTHREADS; // char *PARAM_NAMES[NUM_ARGS] = {"Precision"}; // char *DEFAULTS_VALUE[NUM_ARGS] = {"1000000"}; // char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time"}; /* Default: DEFAULT_PREC; */ NUMTHREADS = 1; //omp_get_num_threads(); //OSCR_init (NUMTHREADS, "Pi generator", "Param: precission", NUM_ARGS, PARAM_NAMES, DEFAULTS_VALUE , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES, argc, argv); //N = OSCR_getarg_int(NUM_ARGS); //OSCR_timer_start(0); w = 1.0 / N; pi = 0.0; /* #pragma omp parallel for default(shared) private(i, local)reduction(+:pi) schedule(static, 1) */ #pragma omp parallel for default(shared) private(i, local)reduction(+:pi) for(i = 0; i < N; i++) { local = (i + 0.5) * w; pi += 4.0 / (1.0 + local * local); } pi *= w; //OSCR_timer_stop(0); total_time = 1; //OSCR_timer_read(0); //OSCR_report(); printf("\n \t# THREADS INTERVAL \tTIME (secs.) \tPI \t\t\tERROR\n"); printf("\t %d \t%10ld \t%14.6lf \t%1.20f\t%g\n", NUMTHREADS, N, total_time, pi, PI25DT-pi); return 0; } /* * vim:ts=2:sw=2: */
density.h
#ifndef __DENSITY_H__ #define __DENSITY_H__ #include <algorithm> #include "Node.h" #include "wtime.h" #if 0 #define SLOW #endif struct Density { typedef boundary<float> Boundary; Particle::Vector density; Boundary BBox; /* bounding box */ struct cmp_particle_key { bool operator () (const Particle &a, const Particle &b) { return a.key.val < b.key.val; } }; struct cmp_particle_ID { bool operator () (const Particle &a, const Particle &b) { return a.ID < b.ID; } }; Density(const Particle::Vector &ptcl_in, const int Nuse, const int Nngb = 32) { const double t0 = wtime(); std::vector<Particle> &ptcl = Node::ptcl; ptcl.reserve(Nuse); const int Nin = ptcl_in.size(); assert(Nuse <= Nin); const float fac = std::max(1.0f, (float)Nin/(float)Nuse); /* import particles and compute the Bounding Box */ for (int i = 0; i < Nuse; i++) { ptcl.push_back(ptcl_in[(int)(i * fac)]); BBox.merge(Boundary(ptcl.back().pos)); } std::cerr << BBox.min << std::endl; std::cerr << BBox.max << std::endl; const vec3 vsize = BBox.hlen(); const float rsize = std::max(vsize.x, std::max(vsize.x, vsize.y)) * 2.0f; /* now build the tree */ const int nbody = Nuse; for (int i = 0; i < nbody; i++) ptcl[i].compute_key(BBox.min, rsize); std::sort(ptcl.begin(), ptcl.end(), cmp_particle_key()); Node::Node_heap.push_back(Node()); Node &root = Node::Node_heap[0]; for (int i = 0; i < nbody; i++) root.push_particle(i, 60); #if 1 /* if h's are not know this set-up estimated range */ const float volume = rsize*rsize*rsize; root.set_init_h(float(Nngb), volume); #endif root.make_boundary(); #ifdef SLOW #pragma omp parallel for for(int i=0; i<nbody; i++) ptcl[i] << root; #else /* FAST */ #ifdef _OPENMP std::vector<Node *> group_list; root.find_group_Node(2000, group_list); #pragma omp parallel for schedule(dynamic) for(int i=0; i<(int)group_list.size(); i++) *group_list[i] << root; #else root << root; #endif /* _OPENMP */ #endif /* SLOW */ const double t1 = wtime(); fprintf(stderr, " -- Density done in %g sec [ %g ptcl/sec ]\n", t1 - t0, Nuse/(t1 - t0)); }; }; #endif /* __DENSITY_H__ */
jacobi.c
// // Implementation of the iterative Jacobi method. // // Given a known, diagonally dominant matrix A and a known vector b, we aim to // to find the vector x that satisfies the following equation: // // Ax = b // // We first split the matrix A into the diagonal D and the remainder R: // // (D + R)x = b // // We then rearrange to form an iterative solution: // // x' = (b - Rx) / D // // More information: // -> https://en.wikipedia.org/wiki/Jacobi_method // #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <x86intrin.h> #include <immintrin.h> #include <omp.h> static int N; static int MAX_ITERATIONS; static int SEED; static double CONVERGENCE_THRESHOLD; const int NUM_THREADS = 16; #define SEPARATOR "------------------------------------\n" // Return the current time in seconds since the Epoch double get_timestamp(); // Parse command line arguments to set solver parameters void parse_arguments(int argc, char *argv[]); // Run the Jacobi solver // Returns the number of iterations performed int run(float* A, float* b, float* x, float* xtmp) { int itr; float diff; float sqdiff; float *ptrtmp; float convergence_threshold_square = CONVERGENCE_THRESHOLD * CONVERGENCE_THRESHOLD; float* d = malloc(N*sizeof(float)); for (int row = 0; row < N; row++) d[row] = 1 / A[row * N + row]; int row, ridx, col; float dot = 0.0; // Loop until converged or maximum iterations reached itr = 0; do { // Perfom Jacobi iteration #pragma omp parallel for private(dot, ridx, col) for (row = 0; row < N; row++) { ridx = row * N; dot = 0.0; for (col = 0; col < row; col++) dot += A[ridx + col] * x[col]; for (col = row + 1; col < N; col++) dot += A[ridx + col] * x[col]; xtmp[row] = (b[row] - dot) * d[row]; } // Swap pointers ptrtmp = x; x = xtmp; xtmp = ptrtmp; // Check for convergence sqdiff = 0.0; for (row = 0; row < N; row++) { diff = xtmp[row] - x[row]; sqdiff += diff * diff; } itr++; } while ((itr < MAX_ITERATIONS) && (sqdiff > convergence_threshold_square)); return itr; } int main(int argc, char *argv[]) { parse_arguments(argc, argv); float *A = malloc(N*N*sizeof(float)); float *ta = malloc(N*N*sizeof(float)); float *b = malloc(N*sizeof(float)); float *x = malloc(N*sizeof(float)); float *xtmp = malloc(N*sizeof(float)); printf(SEPARATOR); printf("Matrix size: %dx%d\n", N, N); printf("Maximum iterations: %d\n", MAX_ITERATIONS); printf("Convergence threshold: %lf\n", CONVERGENCE_THRESHOLD); printf(SEPARATOR); double total_start = get_timestamp(); // Initialize data srand(SEED); int row, col; for (row = 0; row < N; row++) { int ridx = row * N; float rowsum = 0.0; for (col = 0; col < N; col++) { float value = rand()/(float)RAND_MAX; ta[ridx + col] = value; rowsum += value; } ta[ridx + row] += rowsum; b[row] = rand()/(float)RAND_MAX; x[row] = 0.0; } omp_set_num_threads(NUM_THREADS); #pragma omp parallel for private(col) for (row = 0; row < N; row++) { for (col = 0; col < N; col++) A[row * N + col] = ta[row * N + col]; } // Run Jacobi solver double solve_start = get_timestamp(); int itr = run(A, b, x, xtmp); double solve_end = get_timestamp(); // Check error of final solution double err = 0.0; for (int row = 0, ridx = 0; row < N; row++, ridx += N) { double tmp = 0.0; for (int col = 0; col < N; col++) { tmp += A[ridx + col] * x[col]; } tmp = b[row] - tmp; err += tmp*tmp; } err = sqrt(err); double total_end = get_timestamp(); printf("Solution error = %lf\n", err); printf("Iterations = %d\n", itr); printf("Total runtime = %lf seconds\n", (total_end-total_start)); printf("Solver runtime = %lf seconds\n", (solve_end-solve_start)); if (itr == MAX_ITERATIONS) printf("WARNING: solution did not converge\n"); printf(SEPARATOR); free(A); free(b); free(x); free(xtmp); return 0; } double get_timestamp() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec + tv.tv_usec*1e-6; } int parse_int(const char *str) { char *next; int value = strtoul(str, &next, 10); return strlen(next) ? -1 : value; } double parse_double(const char *str) { char *next; double value = strtod(str, &next); return strlen(next) ? -1 : value; } void parse_arguments(int argc, char *argv[]) { // Set default values N = 1000; MAX_ITERATIONS = 20000; CONVERGENCE_THRESHOLD = 0.0001; SEED = 0; for (int i = 1; i < argc; i++) { if (!strcmp(argv[i], "--convergence") || !strcmp(argv[i], "-c")) { if (++i >= argc || (CONVERGENCE_THRESHOLD = parse_double(argv[i])) < 0) { printf("Invalid convergence threshold\n"); exit(1); } } else if (!strcmp(argv[i], "--iterations") || !strcmp(argv[i], "-i")) { if (++i >= argc || (MAX_ITERATIONS = parse_int(argv[i])) < 0) { printf("Invalid number of iterations\n"); exit(1); } } else if (!strcmp(argv[i], "--norder") || !strcmp(argv[i], "-n")) { if (++i >= argc || (N = parse_int(argv[i])) < 0) { printf("Invalid matrix order\n"); exit(1); } } else if (!strcmp(argv[i], "--seed") || !strcmp(argv[i], "-s")) { if (++i >= argc || (SEED = parse_int(argv[i])) < 0) { printf("Invalid seed\n"); exit(1); } } else if (!strcmp(argv[i], "--help") || !strcmp(argv[i], "-h")) { printf("\n"); printf("Usage: ./jacobi [OPTIONS]\n\n"); printf("Options:\n"); printf(" -h --help Print this message\n"); printf(" -c --convergence C Set convergence threshold\n"); printf(" -i --iterations I Set maximum number of iterations\n"); printf(" -n --norder N Set maxtrix order\n"); printf(" -s --seed S Set random number seed\n"); printf("\n"); exit(0); } else { printf("Unrecognized argument '%s' (try '--help')\n", argv[i]); exit(1); } } }
dropout-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file dropout-inl.h * \brief * \author Bing Xu, Da Zheng, Hang Zhang */ #ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_ #define MXNET_OPERATOR_NN_DROPOUT_INL_H_ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <map> #include <vector> #include <string> #include <utility> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../random/sampler.h" #include "../tensor/elemwise_binary_broadcast_op.h" #define MXNET_USE_MKL_DROPOUT defined(USE_MKL) && defined(_OPENMP) && !defined(__CUDACC__) #if MXNET_USE_MKL_DROPOUT #include <omp.h> #include <mkl_vml_functions.h> #include <mkl_vsl.h> #endif // MXNET_USE_MKL_DROPOUT #define MXNET_USE_CUDNN_DROPOUT MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 7 namespace dropout { enum DropoutOpInputs {kData}; enum DropoutOpOutputs {kOut, kMask}; enum DropoutOpForwardResource {kRandom}; enum DropoutOpMode {kTraining, kAlways}; } // namespace dropout namespace mxnet { namespace op { const int MAX_DIM = 5; struct DropoutParam : public dmlc::Parameter<DropoutParam> { float p; int mode; mxnet::TShape axes; dmlc::optional<bool> cudnn_off; DMLC_DECLARE_PARAMETER(DropoutParam) { DMLC_DECLARE_FIELD(p).set_default(0.5) .set_range(0, 1) .describe("Fraction of the input that gets dropped out during training time."); DMLC_DECLARE_FIELD(mode) .add_enum("training", dropout::kTraining) .add_enum("always", dropout::kAlways) .set_default(dropout::kTraining) .describe("Whether to only turn on dropout during training or to also turn on for inference."); DMLC_DECLARE_FIELD(axes).set_default(mxnet::TShape()) .describe("Axes for variational dropout kernel."); DMLC_DECLARE_FIELD(cudnn_off).set_default(dmlc::optional<bool>(true)) .describe("Whether to turn off cudnn in dropout operator. " "This option is ignored if axes is specified."); } }; // struct DropoutParam template<typename xpu, typename DType> class DropoutOp { #if MXNET_USE_MKL_DROPOUT static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen, int n, double p, int* r) { typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1); const int seed = 17 + abs(genImpl.rand() % 4096); CHECK_GE(seed, 0); const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel num_threads(nthr) { const int ithr = omp_get_thread_num(); const int avg_amount = (n + nthr - 1) / nthr; const int my_offset = ithr * avg_amount; const int my_amount = std::min(my_offset + avg_amount, n) - my_offset; if (my_amount > 0) { VSLStreamStatePtr stream; vslNewStream(&stream, VSL_BRNG_MCG31, seed); vslSkipAheadStream(stream, my_offset); viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p); vslDeleteStream(&stream); } } } static inline bool MKLAvailable() { // BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer // will be too small, so we can;t use MKL in those cases return sizeof(DType) >= sizeof(int); } // MKL forward pass inline void MKLForward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data) { Stream<xpu> *s = ctx.get_stream<xpu>(); RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s); DType *outptr = out.dptr_; DType *dataptr = data.dptr_; auto maskptr = reinterpret_cast<int *>(mask.dptr_); int count = mask.shape_[0] * mask.shape_[1]; BernoulliGenerate(*pgen, count, this->pkeep_, maskptr); const float pk_1 = 1.0f / this->pkeep_; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { outptr[i] = dataptr[i] * maskptr[i] * pk_1; } } // MKL backward pass inline void MKLBackward(const OpContext &ctx, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &out_data, const std::vector<TBlob> &out_grad) { Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s); DType *ingradptr = gdata.dptr_; const DType *outgradptr = grad.dptr_; auto maskptr = reinterpret_cast<int *>(mask.dptr_); int count = mask.shape_[0] * mask.shape_[1]; const float pk_1 = 1.0f / this->pkeep_; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { ingradptr[i] = outgradptr[i] * maskptr[i] * pk_1; } } #endif // #if MXNET_USE_MKL_DROPOUT public: /*! * \brief Dropout kernel, compute dropout tensor */ struct DropoutKernel { /*! * \brief Dropout kernel function * \param id Thread number (0-based representing count) * \param gen Random number generator * \param N Total number of items in the output * \param step Step between items, related to parallelism * \param dropout_out Output dropout values * \param mask_out Output mask (is multiplied to create dropout output, may be 0) * \param input_data Input data to perform the dropout on * \param pkeep Dropout rate (keep when the generated random number is less than this value) */ MSHADOW_XINLINE static void Map(int id, RandGenerator<xpu, DType> gen, const int N, const int step, DType *dropout_out, DType *mask_out, const DType *input_data, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold_eq::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); dropout_out[i] = input_data[i] * mask_out[i]; }); } }; struct BernoulliKernel { /*! \brief Bernoulli kernel for generating mask */ MSHADOW_XINLINE static void Map(int id, RandGenerator<xpu, DType> gen, const int N, const int step, DType *mask_out, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); }); } }; explicit DropoutOp(const DropoutParam &param, Context ctx) { this->pkeep_ = 1.0f - param.p; this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode); this->axes_ = param.axes; this->dropout_passthrough_ = true; #if MXNET_USE_CUDNN_DROPOUT this->cudnn_off_ = param.cudnn_off && param.cudnn_off.value(); this->ctx_ = ctx; if (ctx.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) { dtype_ = mshadow::DataType<DType>::kCudnnFlag; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&dx_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&dy_desc_)); CUDNN_CALL(cudnnCreateDropoutDescriptor(&dropout_desc_)); } #endif // MXNET_USE_CUDNN_DROPOUT } ~DropoutOp() { #if MXNET_USE_CUDNN_DROPOUT if (this->ctx_.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) { CUDNN_CALL(cudnnDestroyTensorDescriptor(x_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(y_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(dx_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(dy_desc_)); CUDNN_CALL(cudnnDestroyDropoutDescriptor(dropout_desc_)); } #endif // MXNET_USE_CUDNN_DROPOUT } #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) inline bool CuDNNAvailable() { return this->pkeep_ > 0 && !this->cudnn_off_; } inline void CuDNNForward(const OpContext &ctx, const TBlob &in, const TBlob &mask, const TBlob &out) { Stream<xpu> *s = ctx.get_stream<xpu>(); // set dropout state. ctx.requested[0].get_cudnn_dropout_desc(&dropout_desc_, s, 1.0f - this->pkeep_, seed_); // describe input/output tensor int dim[4], stride[4]; dim[0] = 1; dim[1] = 1; dim[2] = 1; dim[3] = out.Size(); stride[0] = out.Size(); stride[1] = out.Size(); stride[2] = out.Size(); stride[3] = 1; CUDNN_CALL(cudnnSetTensorNdDescriptor(x_desc_, dtype_, 4, dim, stride)); CUDNN_CALL(cudnnSetTensorNdDescriptor(y_desc_, dtype_, 4, dim, stride)); // perform dropout with cudnn CUDNN_CALL(cudnnDropoutGetReserveSpaceSize(x_desc_, &dropout_reserve_byte_)); // cudnn uses bits to record the positions that are dropped, so reserve bytes is always // 1/8 of input size. CHECK_GE(mask.Size() * sizeof(DType), dropout_reserve_byte_) << "The size of the mask space is smaller than the required cudnn reserved space."; CUDNN_CALL(cudnnDropoutForward(s->dnn_handle_, dropout_desc_, x_desc_, in.dptr<DType>(), y_desc_, out.dptr<DType>(), mask.dptr<DType>(), dropout_reserve_byte_)); } inline void CuDNNBackward(const OpContext &ctx, const TBlob &out_grad, const TBlob &mask, const TBlob &in_grad) { Stream<xpu> *s = ctx.get_stream<xpu>(); // describe input/output tensor int dim[4], stride[4]; dim[0] = 1; dim[1] = 1; dim[2] = 1; dim[3] = in_grad.Size(); stride[0] = in_grad.Size(); stride[1] = in_grad.Size(); stride[2] = in_grad.Size(); stride[3] = 1; CUDNN_CALL(cudnnSetTensorNdDescriptor(dy_desc_, dtype_, 4, dim, stride)); CUDNN_CALL(cudnnSetTensorNdDescriptor(dx_desc_, dtype_, 4, dim, stride)); // perform dropout with cudnn CUDNN_CALL(cudnnDropoutBackward(s->dnn_handle_, dropout_desc_, dy_desc_, out_grad.dptr<DType>(), dx_desc_, in_grad.dptr<DType>(), mask.dptr<DType>(), dropout_reserve_byte_)); } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data) { this->dropout_passthrough_ = true; if (req[dropout::kOut] != kNullOp) { CHECK_EQ(in_data.size(), 1U); if (ctx.is_train) { CHECK_EQ(out_data.size(), 2U); } Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob &in = in_data[dropout::kData]; const TBlob &out = out_data[dropout::kOut]; const TBlob &mask = out_data[dropout::kMask]; if (this->pkeep_ < 1 && (ctx.is_train || this->mode_ == dropout::kAlways)) { this->dropout_passthrough_ = false; if (this->axes_.ndim() == 0) { #if MXNET_USE_MKL_DROPOUT if (MKLAvailable()) { MKLForward(ctx, in_data, out_data); return; } #endif // MXNET_USE_MKL_DROPOUT #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) if (CuDNNAvailable()) { CuDNNForward(ctx, in, mask, out); return; } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); CHECK(req[dropout::kOut] != kAddTo); LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(), out.dptr<DType>(), mask.dptr<DType>(), in.dptr<DType>(), this->pkeep_); return; } else { RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); // initialize the mask LaunchRNG<BernoulliKernel, xpu>(s, pgen, mask.Size(), mask.dptr<DType>(), this->pkeep_); // broadcast mul mxnet::TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact(in.shape_, mask.shape_, out.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), in.dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, DType, mshadow_op::mul>, xpu>:: template LaunchEx(s, new_oshape.Size(), req[dropout::kOut], lstride, rstride, oshape, in.dptr<DType>(), mask.dptr<DType>(), out.dptr<DType>()); }); } } } else { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), in.dptr<DType>()); }); } } } void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); if (!this->dropout_passthrough_) { this->dropout_passthrough_ = true; const TBlob &gdata = in_grad[dropout::kData]; const TBlob &grad = out_grad[dropout::kOut]; const TBlob &mask = out_data[dropout::kMask]; if (this->axes_.ndim() == 0) { #if MXNET_USE_MKL_DROPOUT if (MKLAvailable()) { MKLBackward(ctx, in_grad, out_data, out_grad); return; } #endif // MXNET_USE_MKL_DROPOUT #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) if (CuDNNAvailable()) { CuDNNBackward(ctx, grad, mask, gdata); return; } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) // standard case for dropout CHECK_EQ(grad.Size(), mask.Size()); MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); return; } else { // broardcast mul mxnet::TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact(grad.shape_, mask.shape_, gdata.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, DType, mshadow_op::mul>, xpu>:: template LaunchEx(s, new_oshape.Size(), req[0], lstride, rstride, oshape, grad.dptr<DType>(), mask.dptr<DType>(), gdata.dptr<DType>()); }); } } } else { const TBlob& gdata = in_grad[dropout::kData]; const TBlob& grad = out_grad[dropout::kOut]; MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>()); }); } } private: /*! \brief Dropout rate (keep when the generated random number is less than this value) */ real_t pkeep_; /*! \brief Dropout mode */ dropout::DropoutOpMode mode_; /*! \brief Axes on which dropout mask is shared in the form of broadcast multiply */ mxnet::TShape axes_; /*! \brief Flag to record whether forward is executed in pass-through mode */ bool dropout_passthrough_; #if MXNET_USE_CUDNN_DROPOUT bool cudnn_off_; Context ctx_; cudnnDataType_t dtype_; cudnnDropoutDescriptor_t dropout_desc_; uint64_t seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn) size_t dropout_reserve_byte_; cudnnTensorDescriptor_t x_desc_, y_desc_, dx_desc_, dy_desc_; #endif // MXNET_USE_CUDNN_DROPOUT }; // class DropoutOp static OpStatePtr CreateDropoutState(const nnvm::NodeAttrs &attrs, const Context ctx, const mxnet::ShapeVector &in_shapes, const std::vector<int> &in_types) { const DropoutParam& param = nnvm::get<DropoutParam>(attrs.parsed); OpStatePtr state; MSHADOW_REAL_TYPE_SWITCH(in_types[dropout::kData], DType, { if (ctx.dev_type == kGPU) { state = OpStatePtr::Create<DropoutOp<gpu, DType>>(param, ctx); } else { state = OpStatePtr::Create<DropoutOp<cpu, DType>>(param, ctx); } return state; }); LOG(FATAL) << "should never reach here"; return OpStatePtr(); // should never reach here } template<typename xpu> void DropoutCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>(); op.Forward(ctx, inputs, req, outputs); }); } template<typename xpu> void DropoutGradCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1); CHECK_EQ(req.size(), 1); std::vector<TBlob> out_grads(2); std::vector<TBlob> out_data(2); out_grads[dropout::kOut] = inputs[0]; out_data[dropout::kMask] = inputs[1]; MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>(); op.Backward(ctx, out_grads, out_data, req, outputs); }); } } // namespace op } // namespace mxnet #undef MXNET_USE_MKL_DROPOUT #endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
has160_fmt_plug.c
/* HAS160-512 cracker patch for JtR. Hacked together during May, 2015 * by Dhiru Kholia <dhiru.kholia at gmail.com>. * * Thanks for RHash, http://www.randombit.net/has160.html and * https://github.com/maciejczyzewski/retter for the code. */ #if FMT_EXTERNS_H extern struct fmt_main fmt__HAS160; #elif FMT_REGISTERS_H john_register_one(&fmt__HAS160); #else #include <string.h> #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "options.h" #include "has160.h" #if !FAST_FORMATS_OMP #undef _OPENMP #endif #ifdef _OPENMP #ifndef OMP_SCALE #ifdef __MIC__ #define OMP_SCALE 64 #else #define OMP_SCALE 2048 #endif // __MIC__ #endif // OMP_SCALE #include <omp.h> #endif // _OPENMP #include "memdbg.h" #define FORMAT_LABEL "has-160" #define FORMAT_NAME "" #define ALGORITHM_NAME "HAS-160 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define CIPHERTEXT_LENGTH 40 #define BINARY_SIZE 20 #define SALT_SIZE 0 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests[] = { {"307964ef34151d37c8047adec7ab50f4ff89762d", ""}, {"cb5d7efbca2f02e0fb7167cabb123af5795764e5", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"}, {"4872bcbc4cd0f0a9dc7c2f7045e5b43b6c830db8", "a"}, {"975e810488cf2a3d49838478124afce4b1c78804", "abc"}, {"2338dbc8638d31225f73086246ba529f96710bc6", "message digest"}, {"596185c9ab6703d0d0dbb98702bc0f5729cd1d3c", "abcdefghijklmnopqrstuvwxyz"}, {"07f05c8c0773c55ca3a5a695ce6aca4c438911b5", "12345678901234567890123456789012345678901234567890123456789012345678901234567890"}, {NULL} }; static int (*saved_len); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[(BINARY_SIZE) / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); MEM_FREE(saved_len); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; p = ciphertext; q = p; while (atoi16l[ARCH_INDEX(*q)] != 0x7F) q++; return !*q && q - p == CIPHERTEXT_LENGTH; } static void *get_binary(char *ciphertext) { static unsigned char *out; char *p; int i; if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = ciphertext; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_key(char *key, int index) { int len = strlen(key); saved_len[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_len[index] = PLAINTEXT_LENGTH; saved_key[index][len] = 0; memcpy(saved_key[index], key, len); } static char *get_key(int index) { saved_key[index][saved_len[index]] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { has160_ctx ctx; rhash_has160_init(&ctx); rhash_has160_update(&ctx, (unsigned char*)saved_key[index], saved_len[index]); rhash_has160_final(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt__HAS160 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT, { NULL }, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
nested_serialized_task_frames.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt #include "callback.h" #include <omp.h> int main() { #pragma omp parallel num_threads(1) { // region 0 #pragma omp parallel num_threads(1) { // region 1 #pragma omp parallel num_threads(1) { // region 2 // region 2's implicit task print_ids(0); // region 1's implicit task print_ids(1); // region 0's implicit task print_ids(2); // initial task print_ids(3); } } } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task' // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_initial_task_begin: parallel_id=[[INITIAL_PARALLEL_ID:[0-9]+]], task_id=[[INITIAL_TASK_ID:[0-9]+]], actual_parallelism=1, index=1, flags=1 // region 0 // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin // CHECK-SAME: parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[INITIAL_TASK_FRAME_ENTER:0x[0-f]+]], // CHECK-SAME: parallel_id=[[PARALLEL_ID_0:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID_0]], task_id=[[TASK_ID_0:[0-9]+]] // region 1 // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin // CHECK-SAME: parent_task_frame.exit=[[REGION_0_FRAME_EXIT:0x[0-f]+]], parent_task_frame.reenter=[[REGION_0_FRAME_ENTER:0x[0-f]+]], // CHECK-SAME: parallel_id=[[PARALLEL_ID_1:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID_1]], task_id=[[TASK_ID_1:[0-9]+]] // region 2 // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin // CHECK-SAME: parent_task_frame.exit=[[REGION_1_FRAME_EXIT:0x[0-f]+]], parent_task_frame.reenter=[[REGION_1_FRAME_ENTER:0x[0-f]+]], // CHECK-SAME: parallel_id=[[PARALLEL_ID_2:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID_2]], task_id=[[TASK_ID_2:[0-9]+]] // region 2's implicit task information (exit frame should be set, while enter should be NULL) // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID_2]], task_id=[[TASK_ID_2]] // CHECK-SAME: exit_frame={{0x[0-f]+}} // CHECK-SAME: reenter_frame=[[NULL]] // CHECK-SAME: task_type=ompt_task_implicit // region 1's implicit task information (both exit and enter frames should be set) // CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[PARALLEL_ID_1]], task_id=[[TASK_ID_1]] // CHECK-SAME: exit_frame=[[REGION_1_FRAME_EXIT]] // CHECK-SAME: reenter_frame=[[REGION_1_FRAME_ENTER]] // CHECK-SAME: task_type=ompt_task_implicit // region 0's implicit task information (both exit and enter frames should be set) // CHECK: {{^}}[[MASTER_ID]]: task level 2: parallel_id=[[PARALLEL_ID_0]], task_id=[[TASK_ID_0]] // CHECK-SAME: exit_frame=[[REGION_0_FRAME_EXIT]] // CHECK-SAME: reenter_frame=[[REGION_0_FRAME_ENTER]] // CHECK-SAME: task_type=ompt_task_implicit // region 0's initial task information (both exit and enter frames should be set) // CHECK: {{^}}[[MASTER_ID]]: task level 3: parallel_id=[[INITIAL_PARALLEL_ID]], task_id=[[INITIAL_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]] // CHECK-SAME: reenter_frame=[[INITIAL_TASK_FRAME_ENTER]] // CHECK-SAME: task_type=ompt_task_initial return 0; }
svd.c
/* Copyright (c) 2014-2016, Intel Corporation All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <pymic_kernel.h> #include <stdlib.h> #include <stdio.h> #include <mkl.h> PYMIC_KERNEL void empty(int argc, uintptr_t argptr[], size_t sizes[]) { #pragma omp parallel { // do nothing } } PYMIC_KERNEL void dgemm_kernel(const double *A, const int64_t *trA, const double *B, const int64_t *trB, double *C, const int64_t *m, const int64_t *n, const int64_t *k, const double *alpha, const double *beta) { int i; int lda, ldb; int transA, transB; /* check if we need to transpose A */ lda = *k; transA = CblasNoTrans; if (*trA) { lda = *m; transA = CblasTrans; } /* check if we need to transpose B */ ldb = *n; transB = CblasNoTrans; if (*trB) { ldb = *k; transB = CblasTrans; } cblas_dgemm(CblasRowMajor, transA, transB, (int) *m, (int) *n, (int) *k, *alpha, A, lda, B, ldb, *beta, C, (int) *n); }
transform_functions_fp32.h
// Copyright (C) 2019. Huawei Technologies Co., Ltd. All rights reserved. // Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE // WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #ifndef CHEETAH_X86_TRANSFORM_FUNTIONS_FP32_H #define CHEETAH_X86_TRANSFORM_FUNTIONS_FP32_H #include "thread_affinity.h" template <U32 C, U32 N> inline void transformNCHWCxNx(U32 fc, U32 fh, U32 fw, U32 oc, const F32 *input, F32 *output) { F32 *dest = nullptr; const F32 *src; U32 cSize = 0, cSizePadding = 0; U32 lstep = fc * fh * fw; __m256i vindex = _mm256_set_epi32( lstep * 7, lstep * 6, lstep * 5, lstep * 4, lstep * 3, lstep * 2, lstep, 0); for (U32 c = 0; c < fc; c += cSize) { cSize = UNI_MIN(fc - c, C); cSizePadding = UNI_MIN(oc - c, C); for (U32 hw = 0; hw < fh * fw; ++hw) { for (U32 c8 = 0; c8 < cSize; ++c8) { src = input + (c + c8) * fh * fw + hw; dest = output + c * fh * fw * N + hw * cSizePadding * N + c8 * N; if (N >= 8) { _mm256_storeu_ps(dest, _mm256_i32gather_ps(src, vindex, 4)); } if (N >= 16) { _mm256_storeu_ps(dest + 8, _mm256_i32gather_ps(src + 8 * lstep, vindex, 4)); } if (N >= 24) { _mm256_storeu_ps(dest + 16, _mm256_i32gather_ps(src + 16 * lstep, vindex, 4)); } if (N == 32) { _mm256_storeu_ps(dest + 24, _mm256_i32gather_ps(src + 24 * lstep, vindex, 4)); } } UNI_MEMSET(dest + N, 0, ((cSizePadding - cSize) * N * 4)); } } } // N is 32/24 template <U32 C, U32 N> inline EE transformNCHWToNCHWCxNx( TensorDesc inputDesc, const F32 *input, TensorDesc outputDesc, F32 *output) { if (input == NULL || output == NULL) { CHECK_STATUS(NULL_POINTER); } DataType fdt, odt; DataFormat fdf, odf; U32 fn, fc, fh, fw; U32 on, oc, oh, ow; CHECK_STATUS(tensor4dGet(inputDesc, &fdt, &fdf, &fn, &fc, &fh, &fw)); CHECK_STATUS(tensor4dGet(outputDesc, &odt, &odf, &on, &oc, &oh, &ow)); U32 remain = fn % N; fn -= remain; for (U32 n = 0; n < fn; n += N) { transformNCHWCxNx<C, N>(fc, fh, fw, oc, input, output); input += fc * fh * fw * N; output += oc * fh * fw * N; } if (remain >= 24) { transformNCHWCxNx<C, 24>(fc, fh, fw, oc, input, output); input += fc * fh * fw * 24; output += oc * fh * fw * 24; remain -= 24; } if (remain >= 16) { transformNCHWCxNx<C, 16>(fc, fh, fw, oc, input, output); input += fc * fh * fw * 16; output += oc * fh * fw * 16; remain -= 16; } if (remain >= 8) { transformNCHWCxNx<C, 8>(fc, fh, fw, oc, input, output); input += fc * fh * fw * 8; output += oc * fh * fw * 8; remain -= 8; } if (remain > 0) { F32 *dest = NULL; U32 cSize = 0, cSizePadding = 0; F32 m[8] = {0.0f}; for (U32 i = 0; i < remain; ++i) { m[i] = -1.0f; } __m256 mask = _mm256_set_ps(m[7], m[6], m[5], m[4], m[3], m[2], m[1], m[0]); U32 lstep = fc * fh * fw; __m256i vindex = _mm256_set_epi32( lstep * 7, lstep * 6, lstep * 5, lstep * 4, lstep * 3, lstep * 2, lstep, 0); __m256 src256 = _mm256_setzero_ps(); for (U32 c = 0; c < fc; c += cSize) { cSize = UNI_MIN(fc - c, C); cSizePadding = UNI_MIN(oc - c, C); for (U32 hw = 0; hw < fh * fw; ++hw) { for (U32 c8 = 0; c8 < cSize; ++c8) { const F32 *src = input + (c + c8) * fh * fw + hw; dest = output + c * fh * fw * 8 + hw * cSizePadding * 8 + c8 * 8; _mm256_storeu_ps(dest, _mm256_mask_i32gather_ps(src256, src, vindex, mask, 4)); } UNI_MEMSET(dest + 8, 0, ((cSizePadding - cSize) * 32)); } } fn += remain; } return SUCCESS; } inline void PaddingNCHWC8( F32 *data, F32 *tmp, TensorDesc inputDesc, ConvolutionParamSpec convParamSpec) { // NCHWC8 DataType idt; DataFormat idf; U32 in, ic, ih, iw; CHECK_STATUS(tensor4dGet(inputDesc, &idt, &idf, &in, &ic, &ih, &iw)); U32 paddingT = convParamSpec.pad_top; U32 paddingB = convParamSpec.pad_bottom; U32 paddingL = convParamSpec.pad_left; U32 paddingR = convParamSpec.pad_right; U32 padih = paddingT + paddingB + ih; U32 padiw = paddingL + paddingR + iw; CHECK_REQUIREMENT((idf == DF_NCHWC8) && (ic % 8 == 0)); ic /= 8; #ifdef _USE_OPENMP #pragma omp parallel num_threads(OMP_NUM_THREADS) { #endif #ifdef _USE_OPENMP #pragma omp for schedule(static) #endif for (U32 c = 0; c < ic; ++c) { U32 coff = c * padih * padiw * 8; UNI_MEMSET(tmp + coff, 0, padiw * paddingT * 8 * bytesOf(idt)); UNI_MEMSET( tmp + coff + (ih + paddingT) * padiw * 8, 0, padiw * paddingB * 8 * bytesOf(idt)); } #ifdef _USE_OPENMP #pragma omp for schedule(static) #endif for (U32 hc = 0; hc < ih * ic; ++hc) { U32 c = hc / ih; U32 coff = c * padih * padiw * 8; U32 h = hc % ih; U32 hoff = (h + paddingT) * padiw; UNI_MEMSET(tmp + coff + hoff * 8, 0, paddingL * 8 * bytesOf(idt)); UNI_MEMCPY(tmp + coff + (hoff + paddingL) * 8, data + c * ih * iw * 8 + h * iw * 8, iw * 8 * bytesOf(idt)); UNI_MEMSET(tmp + coff + (hoff + (paddingL + iw)) * 8, 0, paddingR * 8 * bytesOf(idt)); } #ifdef _USE_OPENMP } #endif } inline void deconvOverlapAndCrop(F32 *input, F32 *output, TensorDesc inputDesc, TensorDesc outputDesc, ConvolutionParamSpec convParamSpec) { DataType idt, odt; DataFormat idf, odf; U32 in, ic, ih, iw, on, oc, oh, ow; CHECK_STATUS(tensor4dGet(inputDesc, &idt, &idf, &in, &ic, &ih, &iw)); CHECK_STATUS(tensor4dGet(outputDesc, &odt, &odf, &on, &oc, &oh, &ow)); U32 fh = convParamSpec.kernel_h; U32 fw = convParamSpec.kernel_w; U32 fhfw = fh * fw; U32 strideH = convParamSpec.stride_h; U32 strideW = convParamSpec.stride_w; U32 paddingT = convParamSpec.pad_top; U32 paddingL = convParamSpec.pad_left; __m256i vindex = _mm256_set_epi32(fhfw * 7, fhfw * 6, fhfw * 5, fhfw * 4, fhfw * 3, fhfw * 2, fhfw, 0); for (U32 kn = 0; kn < in; ++kn) { for (U32 kh = 0; kh < ih; ++kh) { for (U32 kw = 0; kw < iw; ++kw) { for (U32 kc = 0; kc < oc; kc += 8) { for (U32 jh = 0; jh < fh; ++jh) { for (U32 jw = 0; jw < fw; ++jw) { U32 ohIdx = kh * strideH + jh; U32 owIdx = kw * strideW + jw; if ((ohIdx < paddingT) || (ohIdx >= oh + paddingT) || (owIdx < paddingL) || (owIdx >= ow + paddingL)) { continue; } ohIdx -= paddingT; owIdx -= paddingL; U32 oidx = (kc * oh + ohIdx * 8) * ow + owIdx * 8; U32 iidx = ((kh * iw + kw) * oc + kc) * fhfw + jh * fw + jw; __m256 x = _mm256_i32gather_ps(input + iidx, vindex, 4); x = _mm256_add_ps(x, _mm256_loadu_ps(output + oidx)); _mm256_storeu_ps(output + oidx, x); } } } } } input += oc * fh * fw * ih * iw; output += oc * oh * ow; } } inline void deconvOverlapAndCropNCHWC8(F32 *input, F32 *output, TensorDesc inputDesc, TensorDesc outputDesc, ConvolutionParamSpec convParamSpec) { DataType idt, odt; DataFormat idf, odf; U32 in, ic, ih, iw, on, oc, oh, ow; CHECK_STATUS(tensor4dGet(inputDesc, &idt, &idf, &in, &ic, &ih, &iw)); CHECK_STATUS(tensor4dGet(outputDesc, &odt, &odf, &on, &oc, &oh, &ow)); U32 fh = convParamSpec.kernel_h; U32 fw = convParamSpec.kernel_w; U32 fhfw = fh * fw; U32 strideH = convParamSpec.stride_h; U32 strideW = convParamSpec.stride_w; U32 paddingT = convParamSpec.pad_top; U32 paddingL = convParamSpec.pad_left; for (U32 kn = 0; kn < in; ++kn) { for (U32 kh = 0; kh < ih; ++kh) { for (U32 kw = 0; kw < iw; ++kw) { for (U32 kc = 0; kc < oc; kc += 8) { for (U32 jh = 0; jh < fh; ++jh) { for (U32 jw = 0; jw < fw; ++jw) { U32 ohIdx = kh * strideH + jh; U32 owIdx = kw * strideW + jw; if ((ohIdx < paddingT) || (ohIdx >= oh + paddingT) || (owIdx < paddingL) || (owIdx >= ow + paddingL)) { continue; } ohIdx -= paddingT; owIdx -= paddingL; U32 oidx = (kc * oh + ohIdx * 8) * ow + owIdx * 8; U32 iidx = ((jh * fw + jw) * oc + kc) * ih * iw + kh * iw * 8 + kw * 8; _mm256_storeu_ps(output + oidx, _mm256_add_ps( _mm256_loadu_ps(input + iidx), _mm256_loadu_ps(output + oidx))); } } } } } input += oc * fh * fw * ih * iw; output += oc * oh * ow; } } inline void deconvOverlapAndCropEqualNCHWC8(F32 *input, F32 *output, const F32 *bias, TensorDesc inputDesc, TensorDesc outputDesc, ConvolutionParamSpec convParamSpec) { DataType idt, odt; DataFormat idf, odf; U32 in, ic, ih, iw, on, oc, oh, ow; CHECK_STATUS(tensor4dGet(inputDesc, &idt, &idf, &in, &ic, &ih, &iw)); CHECK_STATUS(tensor4dGet(outputDesc, &odt, &odf, &on, &oc, &oh, &ow)); U32 fh = convParamSpec.kernel_h; U32 fw = convParamSpec.kernel_w; U32 fhfw = fh * fw; U32 strideH = convParamSpec.stride_h; U32 strideW = convParamSpec.stride_w; U32 paddingT = convParamSpec.pad_top; U32 paddingL = convParamSpec.pad_left; for (U32 kn = 0; kn < in; ++kn) { for (U32 kc = 0; kc < oc; kc += 8) { #ifdef _USE_OPENMP #pragma omp parallel for num_threads(OMP_NUM_THREADS) schedule(static) #endif for (U32 kh = 0; kh < ih; ++kh) { for (U32 kw = 0; kw < iw; ++kw) { for (U32 jh = 0; jh < fh; ++jh) { for (U32 jw = 0; jw < fw; ++jw) { U32 ohIdx = kh * strideH + jh; U32 owIdx = kw * strideW + jw; if ((ohIdx < paddingT) || (ohIdx >= oh + paddingT) || (owIdx < paddingL) || (owIdx >= ow + paddingL)) { continue; } ohIdx -= paddingT; owIdx -= paddingL; U32 oidx = (kc * oh + ohIdx * 8) * ow + owIdx * 8; U32 iidx = ((jh * fw + jw) * oc + kc) * ih * iw + kh * iw * 8 + kw * 8; _mm256_storeu_ps(output + oidx, _mm256_loadu_ps(input + iidx)); } } } } } input += oc * fh * fw * ih * iw; output += oc * oh * ow; } } #endif
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/memory_.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resample.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImage() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImage method is: % % MagickBooleanType CompositeImage(Image *image, % const Image *source_image,const CompositeOperator compose, % const MagickBooleanType clip_to_self,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the canvas image, modified by he composition % % o source_image: the source image. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o clip_to_self: set to MagickTrue to limit composition to area composed. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o exception: return any errors or warnings in this structure. % */ /* Composition based on the SVG specification: A Composition is defined by... Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc) Y = 1 for source preserved Z = 1 for canvas preserved Conversion to transparency (then optimized) Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) Where... Sca = Sc*Sa normalized Source color divided by Source alpha Dca = Dc*Da normalized Dest color divided by Dest alpha Dc' = Dca'/Da' the desired color value for this channel. Da' in in the follow formula as 'gamma' The resulting alpla value. Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in the following optimizations... gamma = Sa+Da-Sa*Da; gamma = 1 - QuantumScale*alpha * QuantumScale*beta; opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma The above SVG definitions also define that Mathematical Composition methods should use a 'Over' blending mode for Alpha Channel. It however was not applied for composition modes of 'Plus', 'Minus', the modulus versions of 'Add' and 'Subtract'. Mathematical operator changes to be applied from IM v6.7... 1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed 'ModulusAdd' and 'ModulusSubtract' for clarity. 2) All mathematical compositions work as per the SVG specification with regard to blending. This now includes 'ModulusAdd' and 'ModulusSubtract'. 3) When the special channel flag 'sync' (syncronize channel updates) is turned off (enabled by default) then mathematical compositions are only performed on the channels specified, and are applied independantally of each other. In other words the mathematics is performed as 'pure' mathematical operations, rather than as image operations. */ static void HCLComposite(const MagickRealType hue,const MagickRealType chroma, const MagickRealType luma,MagickRealType *red,MagickRealType *green, MagickRealType *blue) { MagickRealType b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma, MagickRealType *luma) { MagickRealType b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (MagickRealType *) NULL); assert(chroma != (MagickRealType *) NULL); assert(luma != (MagickRealType *) NULL); r=red; g=green; b=blue; max=MagickMax(r,MagickMax(g,b)); c=max-(MagickRealType) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == max) h=fmod((g-b)/c+6.0,6.0); else if (green == max) h=((b-r)/c)+2.0; else if (blue == max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static MagickBooleanType CompositeOverImage(Image *image, const Image *source_image,const MagickBooleanType clip_to_self, const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *image_view, *source_view; const char *value; MagickBooleanType clamp, status; MagickOffsetType progress; ssize_t y; /* Composite image. */ status=MagickTrue; progress=0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; PixelInfo canvas_pixel, source_pixel; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*(ssize_t) GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, Sa, Sc, Sca; ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); alpha=Sa+Da-Sa*Da; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if ((source_traits == UndefinedPixelTrait) && (channel != AlphaPixelChannel)) continue; if (channel == AlphaPixelChannel) { /* Set alpha channel. */ pixel=QuantumRange*alpha; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=ClampToQuantum(Sc); continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; gamma=PerceptibleReciprocal(alpha); pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } MagickExport MagickBooleanType CompositeImage(Image *image, const Image *composite,const CompositeOperator compose, const MagickBooleanType clip_to_self,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *source_view, *image_view; const char *value; GeometryInfo geometry_info; Image *canvas_image, *source_image; MagickBooleanType clamp, status; MagickOffsetType progress; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); source_image=CloneImage(composite,0,0,MagickTrue,exception); if (source_image == (const Image *) NULL) return(MagickFalse); (void) SetImageColorspace(source_image,image->colorspace,exception); if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp)) { status=CompositeOverImage(image,source_image,clip_to_self,x_offset, y_offset,exception); source_image=DestroyImage(source_image); return(status); } amount=0.5; canvas_image=(Image *) NULL; canvas_dissolve=1.0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); SetGeometryInfo(&geometry_info); percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; if ((source_image->alpha_trait == UndefinedPixelTrait) && (image->alpha_trait != UndefinedPixelTrait)) (void) SetImageAlphaChannel(source_image,OpaqueAlphaChannel,exception); status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; const Quantum *p; Quantum *q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { ssize_t i; if (GetPixelReadMask(source_image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(source_image); i++) { PixelChannel channel = GetPixelChannelChannel(source_image,i); PixelTrait source_traits = GetPixelChannelTraits(source_image, channel); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((source_traits == UndefinedPixelTrait) || (traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case IntensityCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; const Quantum *p; Quantum *q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { if (GetPixelReadMask(source_image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } SetPixelAlpha(image,clamp != MagickFalse ? ClampPixel(GetPixelIntensity(source_image,p)) : ClampToQuantum(GetPixelIntensity(source_image,p)),q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case CopyAlphaCompositeOp: case ChangeMaskCompositeOp: { /* Modify canvas outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case BlurCompositeOp: { CacheView *canvas_view; double angle_range, angle_start, height, width; PixelInfo pixel; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (const char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "InvalidSetting","'%s' '%s'","compose:args",value); source_image=DestroyImage(source_image); canvas_image=DestroyImage(canvas_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=2.0*geometry_info.rho; height=width; if ((flags & HeightValue) != 0) height=2.0*geometry_info.sigma; /* Default the unrotated ellipse width and height axis vectors. */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; if ((flags & XValue) != 0 ) { MagickRealType angle; /* Rotate vectors if a rotation angle is given. */ angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { /* Lets set a angle range and calculate in the loop. */ angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter); /* Perform the variable blurring of each pixel in image. */ GetPixelInfo(image,&pixel); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } if (fabs(angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale* GetPixelBlue(source_image,p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(source_image,p), blur.y1*QuantumScale*GetPixelGreen(source_image,p), blur.x2*QuantumScale*GetPixelRed(source_image,p), blur.y2*QuantumScale*GetPixelGreen(source_image,p) ); (void) ResamplePixelColor(resample_filter,(double) x_offset+x, (double) y_offset+y,&pixel,exception); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); source_view=DestroyCacheView(source_view); canvas_view=DestroyCacheView(canvas_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *canvas_view; MagickRealType horizontal_scale, vertical_scale; PixelInfo pixel; PointInfo center, offset; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue | HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0; vertical_scale=(MagickRealType) (source_image->rows-1)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1)/2.0; vertical_scale=(MagickRealType) (image->rows-1)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(source_image->columns-1)/200.0; vertical_scale*=(source_image->rows-1)/200.0; } else { horizontal_scale*=(image->columns-1)/200.0; vertical_scale*=(image->rows-1)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x=(MagickRealType) ((image->columns-1)/2.0); else center.x=(MagickRealType) (x_offset+(source_image->columns-1)/ 2.0); else if ((flags & AspectValue) != 0) center.x=geometry_info.xi; else center.x=(MagickRealType) (x_offset+geometry_info.xi); if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y=(MagickRealType) ((image->rows-1)/2.0); else center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0); else if ((flags & AspectValue) != 0) center.y=geometry_info.psi; else center.y=(MagickRealType) (y_offset+geometry_info.psi); } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } /* Displace the offset. */ offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0); offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0); status=InterpolatePixelInfo(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); if (status == MagickFalse) break; /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)* (QuantumScale*GetPixelAlpha(source_image,p)); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } if (x < (ssize_t) source_image->columns) break; sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } canvas_view=DestroyCacheView(canvas_view); source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { canvas_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; if ((canvas_dissolve-MagickEpsilon) < 0.0) canvas_dissolve=0.0; } break; } case BlendCompositeOp: { value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); if (flags == NoValue) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidGeometry","`%s'",value); } break; } case ModulateCompositeOp: { /* Determine the luma and chroma scale. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } /* Composite image. */ status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; MagickRealType blue, chroma, green, hue, luma, red; PixelInfo canvas_pixel, source_pixel; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*(ssize_t) GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } hue=0.0; chroma=0.0; luma=0.0; GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, DcaDa, Sa, SaSca, Sc, Sca; ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; switch (compose) { case AlphaCompositeOp: case ChangeMaskCompositeOp: case CopyAlphaCompositeOp: case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case OutCompositeOp: case SrcInCompositeOp: case SrcOutCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; break; } case ClearCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=0.0; break; } case BlendCompositeOp: case DissolveCompositeOp: { if (channel == AlphaPixelChannel) pixel=canvas_dissolve*GetPixelAlpha(source_image,source); else pixel=(MagickRealType) source[channel]; break; } default: { pixel=(MagickRealType) source[channel]; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); switch (compose) { case BumpmapCompositeOp: { alpha=GetPixelIntensity(source_image,p)*Sa; break; } case ColorBurnCompositeOp: case ColorDodgeCompositeOp: case DarkenCompositeOp: case DifferenceCompositeOp: case DivideDstCompositeOp: case DivideSrcCompositeOp: case ExclusionCompositeOp: case FreezeCompositeOp: case HardLightCompositeOp: case HardMixCompositeOp: case InterpolateCompositeOp: case LightenCompositeOp: case LinearBurnCompositeOp: case LinearDodgeCompositeOp: case LinearLightCompositeOp: case MathematicsCompositeOp: case MinusDstCompositeOp: case MinusSrcCompositeOp: case MultiplyCompositeOp: case NegateCompositeOp: case OverlayCompositeOp: case PegtopLightCompositeOp: case PinLightCompositeOp: case ReflectCompositeOp: case ScreenCompositeOp: case SoftBurnCompositeOp: case SoftDodgeCompositeOp: case SoftLightCompositeOp: case StampCompositeOp: case VividLightCompositeOp: { alpha=RoundToUnity(Sa+Da-Sa*Da); break; } case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case SrcInCompositeOp: { alpha=Sa*Da; break; } case DissolveCompositeOp: { alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+ canvas_dissolve*Da; break; } case DstOverCompositeOp: case OverCompositeOp: case SrcOverCompositeOp: { alpha=Sa+Da-Sa*Da; break; } case DstOutCompositeOp: { alpha=Da*(1.0-Sa); break; } case OutCompositeOp: case SrcOutCompositeOp: { alpha=Sa*(1.0-Da); break; } case BlendCompositeOp: case PlusCompositeOp: { alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da); break; } case XorCompositeOp: { alpha=Sa+Da-2.0*Sa*Da; break; } case ModulusAddCompositeOp: { if ((Sa+Da) <= 1.0) { alpha=(Sa+Da); break; } alpha=((Sa+Da)-1.0); break; } case ModulusSubtractCompositeOp: { if ((Sa-Da) >= 0.0) { alpha=(Sa-Da); break; } alpha=((Sa-Da)+1.0); break; } default: { alpha=1.0; break; } } switch (compose) { case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case ModulateCompositeOp: case RMSECompositeOp: case SaturateCompositeOp: { GetPixelInfoPixel(source_image,p,&source_pixel); GetPixelInfoPixel(image,q,&canvas_pixel); break; } default: break; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel, sans; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits = GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if ((channel == AlphaPixelChannel) && ((traits & UpdatePixelTrait) != 0)) { /* Set alpha channel. */ switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case CopyBlackCompositeOp: case CopyBlueCompositeOp: case CopyCyanCompositeOp: case CopyGreenCompositeOp: case CopyMagentaCompositeOp: case CopyRedCompositeOp: case CopyYellowCompositeOp: case SrcAtopCompositeOp: case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Da; break; } case ChangeMaskCompositeOp: { if (IsFuzzyEquivalencePixel(source_image,p,image,q) != MagickFalse) pixel=(MagickRealType) TransparentAlpha; else pixel=QuantumRange*Da; break; } case ClearCompositeOp: { pixel=(MagickRealType) TransparentAlpha; break; } case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case RMSECompositeOp: case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Da; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Sa; break; } if (Sa < Da) { pixel=QuantumRange*Da; break; } pixel=QuantumRange*Sa; break; } case CopyAlphaCompositeOp: { if (source_image->alpha_trait == UndefinedPixelTrait) pixel=GetPixelIntensity(source_image,p); else pixel=QuantumRange*Sa; break; } case BlurCompositeOp: case CopyCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: case DstAtopCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sa; break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case DifferenceCompositeOp: { pixel=QuantumRange*fabs((double) (Sa-Da)); break; } case FreezeCompositeOp: { pixel=QuantumRange*(1.0-(1.0-Sa)*(1.0-Sa)* PerceptibleReciprocal(Da)); if (pixel < 0.0) pixel=0.0; break; } case InterpolateCompositeOp: { pixel=QuantumRange*(0.5-0.25*cos(MagickPI*Sa)-0.25* cos(MagickPI*Da)); break; } case LightenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case ModulateCompositeOp: { pixel=QuantumRange*Da; break; } case MultiplyCompositeOp: { pixel=QuantumRange*Sa*Da; break; } case NegateCompositeOp: { pixel=QuantumRange*((1.0-Sa-Da)); break; } case ReflectCompositeOp: { pixel=QuantumRange*(Sa*Sa*PerceptibleReciprocal(1.0-Da)); if (pixel > QuantumRange) pixel=QuantumRange; break; } case StampCompositeOp: { pixel=QuantumRange*(Sa+Da*Da-1.0); break; } case StereoCompositeOp: { pixel=QuantumRange*(Sa+Da)/2; break; } default: { pixel=QuantumRange*alpha; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } if (source_traits == UndefinedPixelTrait) continue; /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=ClampToQuantum(Dc); continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; SaSca=Sa*PerceptibleReciprocal(Sca); DcaDa=Dca*PerceptibleReciprocal(Da); switch (compose) { case DarkenCompositeOp: case LightenCompositeOp: case ModulusSubtractCompositeOp: { gamma=PerceptibleReciprocal(1.0-alpha); break; } default: { gamma=PerceptibleReciprocal(alpha); break; } } pixel=Dc; switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case SrcAtopCompositeOp: { pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa)); break; } case BlendCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc); break; } case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sca; break; } case BlurCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: { pixel=Sc; break; } case BumpmapCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc; break; } case ChangeMaskCompositeOp: { pixel=Dc; break; } case ClearCompositeOp: { pixel=0.0; break; } case ColorBurnCompositeOp: { if ((Sca == 0.0) && (Dca == Da)) { pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa)); break; } if (Sca == 0.0) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-DcaDa)* SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ColorDodgeCompositeOp: { if ((Sca*Da+Dca*Sa) >= Sa*Da) pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); else pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+ Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ColorizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &sans,&sans,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case CopyAlphaCompositeOp: { pixel=Dc; break; } case CopyBlackCompositeOp: { if (channel == BlackPixelChannel) pixel=(MagickRealType) GetPixelBlack(source_image,p); break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { if (channel == BluePixelChannel) pixel=(MagickRealType) GetPixelBlue(source_image,p); break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { if (channel == GreenPixelChannel) pixel=(MagickRealType) GetPixelGreen(source_image,p); break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { if (channel == RedPixelChannel) pixel=(MagickRealType) GetPixelRed(source_image,p); break; } case DarkenCompositeOp: { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ if ((Sca*Da) < (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case DifferenceCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa)); break; } case DissolveCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa* canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc); break; } case DivideDstCompositeOp: { if ((fabs((double) Sca) < MagickEpsilon) && (fabs((double) Dca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (fabs((double) Dca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case DivideSrcCompositeOp: { if ((fabs((double) Dca) < MagickEpsilon) && (fabs((double) Sca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } if (fabs((double) Sca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } pixel=QuantumRange*gamma*(Dca*Sa*SaSca+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } case DstAtopCompositeOp: { pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da)); break; } case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Dca; break; } case DstInCompositeOp: { pixel=QuantumRange*gamma*(Dca*Sa); break; } case DstOutCompositeOp: { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)); break; } case DstOverCompositeOp: { pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da)); break; } case ExclusionCompositeOp: { pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case FreezeCompositeOp: { pixel=QuantumRange*gamma*(1.0-(1.0-Sca)*(1.0-Sca)* PerceptibleReciprocal(Dca)); if (pixel < 0.0) pixel=0.0; break; } case HardLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0- Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case HardMixCompositeOp: { pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange); break; } case HueCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&sans,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case InCompositeOp: case SrcInCompositeOp: { pixel=QuantumRange*(Sca*Da); break; } case InterpolateCompositeOp: { pixel=QuantumRange*(0.5-0.25*cos(MagickPI*Sca)-0.25* cos(MagickPI*Dca)); break; } case LinearBurnCompositeOp: { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da); break; } case LinearDodgeCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc); break; } case LinearLightCompositeOp: { /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca); break; } case LightenCompositeOp: { if ((Sca*Da) > (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case LightenIntensityCompositeOp: { /* Lighten is equivalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case LuminizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&sans,&luma); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case MathematicsCompositeOp: { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+ geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+ geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case MinusDstCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa); break; } case MinusSrcCompositeOp: { /* Minus source from canvas. f(Sc,Dc) = Sc - Dc */ pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da); break; } case ModulateCompositeOp: { ssize_t offset; if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint); if (offset == 0) { pixel=Dc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ModulusAddCompositeOp: { if ((Sca+Dca) <= 1.0) { pixel=QuantumRange*(Sca+Dca); break; } pixel=QuantumRange*((Sca+Dca)-1.0); break; } case ModulusSubtractCompositeOp: { if ((Sca-Dca) >= 0.0) { pixel=QuantumRange*(Sca-Dca); break; } pixel=QuantumRange*((Sca-Dca)+1.0); break; } case MultiplyCompositeOp: { pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case NegateCompositeOp: { pixel=QuantumRange*(1.0-fabs(1.0-Sca-Dca)); break; } case OutCompositeOp: case SrcOutCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)); break; } case OverCompositeOp: case SrcOverCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); break; } case OverlayCompositeOp: { if ((2.0*Dca) < Da) { pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0- Da)); break; } pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+ Sca*(1.0-Da)); break; } case PegtopLightCompositeOp: { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs((double) Da) < MagickEpsilon) { pixel=QuantumRange*gamma*Sca; break; } pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0- Da)+Dca*(1.0-Sa)); break; } case PinLightCompositeOp: { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if ((Dca*Sa) < (Da*(2.0*Sca-Sa))) { pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); break; } if ((Dca*Sa) > (2.0*Sca*Da)) { pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca); break; } case PlusCompositeOp: { pixel=QuantumRange*(Sca+Dca); break; } case ReflectCompositeOp: { pixel=QuantumRange*gamma*(Sca*Sca*PerceptibleReciprocal(1.0-Dca)); if (pixel > QuantumRange) pixel=QuantumRange; break; } case RMSECompositeOp: { double gray; if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } gray=sqrt( (canvas_pixel.red-source_pixel.red)* (canvas_pixel.red-source_pixel.red)+ (canvas_pixel.green-source_pixel.green)* (canvas_pixel.green-source_pixel.green)+ (canvas_pixel.blue-source_pixel.blue)* (canvas_pixel.blue-source_pixel.blue)/3.0); switch (channel) { case RedPixelChannel: pixel=gray; break; case GreenPixelChannel: pixel=gray; break; case BluePixelChannel: pixel=gray; break; default: pixel=Dc; break; } break; } case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ScreenCompositeOp: { /* Screen: a negated multiply: f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca); break; } case SoftBurnCompositeOp: { if ((Sca+Dca) < 1.0) pixel=QuantumRange*gamma*(0.5*Dca*PerceptibleReciprocal(1.0-Sca)); else pixel=QuantumRange*gamma*(1.0-0.5*(1.0-Sca)* PerceptibleReciprocal(Dca)); break; } case SoftDodgeCompositeOp: { if ((Sca+Dca) < 1.0) pixel=QuantumRange*gamma*(0.5*Sca*PerceptibleReciprocal(1.0-Dca)); else pixel=QuantumRange*gamma*(1.0-0.5*(1.0-Dca)* PerceptibleReciprocal(Sca)); break; } case SoftLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-DcaDa))+ Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*DcaDa* (4.0*DcaDa+1.0)*(DcaDa-1.0)+7.0*DcaDa)+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow(DcaDa,0.5)- DcaDa)+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case StampCompositeOp: { pixel=QuantumRange*(Sca+Dca*Dca-1.0); break; } case StereoCompositeOp: { if (channel == RedPixelChannel) pixel=(MagickRealType) GetPixelRed(source_image,p); break; } case ThresholdCompositeOp: { MagickRealType delta; delta=Sc-Dc; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) { pixel=gamma*Dc; break; } pixel=gamma*(Dc+delta*amount); break; } case VividLightCompositeOp: { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs((double) Sa) < MagickEpsilon) || (fabs((double) (Sca-Sa)) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if ((2.0*Sca) <= Sa) { pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)* PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa*Sa*PerceptibleReciprocal(2.0* (Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case XorCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } default: { pixel=Sc; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); if (canvas_image != (Image * ) NULL) canvas_image=DestroyImage(canvas_image); else source_image=DestroyImage(source_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o texture_image: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture, ExceptionInfo *exception) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace,exception); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod, exception); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->alpha_trait != UndefinedPixelTrait) || (texture_image->alpha_trait != UndefinedPixelTrait))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,texture_image,image->compose, MagickTrue,x+texture_image->tile_offset.x,y+ texture_image->tile_offset.y,exception); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(texture_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; const Quantum *p, *pixels; ssize_t x; Quantum *q; size_t width; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x, (y+texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { ssize_t j; p=pixels; width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; for (j=0; j < (ssize_t) width; j++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++) { PixelChannel channel = GetPixelChannelChannel(texture_image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait texture_traits=GetPixelChannelTraits(texture_image, channel); if ((traits == UndefinedPixelTrait) || (texture_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(texture_image); q+=GetPixelChannels(image); } } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }
triplet_kpoint.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* These codes were originally parts of spglib, but only develped */ /* and used for phono3py. Therefore these were moved from spglib to */ /* phono3py. This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stdlib.h> #include <mathfunc.h> #include <kpoint.h> #include <kgrid.h> #include <triplet_h/triplet.h> #include <triplet_h/triplet_kpoint.h> #define KPT_NUM_BZ_SEARCH_SPACE 125 static int bz_search_space[KPT_NUM_BZ_SEARCH_SPACE][3] = { { 0, 0, 0}, { 0, 0, 1}, { 0, 0, 2}, { 0, 0, -2}, { 0, 0, -1}, { 0, 1, 0}, { 0, 1, 1}, { 0, 1, 2}, { 0, 1, -2}, { 0, 1, -1}, { 0, 2, 0}, { 0, 2, 1}, { 0, 2, 2}, { 0, 2, -2}, { 0, 2, -1}, { 0, -2, 0}, { 0, -2, 1}, { 0, -2, 2}, { 0, -2, -2}, { 0, -2, -1}, { 0, -1, 0}, { 0, -1, 1}, { 0, -1, 2}, { 0, -1, -2}, { 0, -1, -1}, { 1, 0, 0}, { 1, 0, 1}, { 1, 0, 2}, { 1, 0, -2}, { 1, 0, -1}, { 1, 1, 0}, { 1, 1, 1}, { 1, 1, 2}, { 1, 1, -2}, { 1, 1, -1}, { 1, 2, 0}, { 1, 2, 1}, { 1, 2, 2}, { 1, 2, -2}, { 1, 2, -1}, { 1, -2, 0}, { 1, -2, 1}, { 1, -2, 2}, { 1, -2, -2}, { 1, -2, -1}, { 1, -1, 0}, { 1, -1, 1}, { 1, -1, 2}, { 1, -1, -2}, { 1, -1, -1}, { 2, 0, 0}, { 2, 0, 1}, { 2, 0, 2}, { 2, 0, -2}, { 2, 0, -1}, { 2, 1, 0}, { 2, 1, 1}, { 2, 1, 2}, { 2, 1, -2}, { 2, 1, -1}, { 2, 2, 0}, { 2, 2, 1}, { 2, 2, 2}, { 2, 2, -2}, { 2, 2, -1}, { 2, -2, 0}, { 2, -2, 1}, { 2, -2, 2}, { 2, -2, -2}, { 2, -2, -1}, { 2, -1, 0}, { 2, -1, 1}, { 2, -1, 2}, { 2, -1, -2}, { 2, -1, -1}, {-2, 0, 0}, {-2, 0, 1}, {-2, 0, 2}, {-2, 0, -2}, {-2, 0, -1}, {-2, 1, 0}, {-2, 1, 1}, {-2, 1, 2}, {-2, 1, -2}, {-2, 1, -1}, {-2, 2, 0}, {-2, 2, 1}, {-2, 2, 2}, {-2, 2, -2}, {-2, 2, -1}, {-2, -2, 0}, {-2, -2, 1}, {-2, -2, 2}, {-2, -2, -2}, {-2, -2, -1}, {-2, -1, 0}, {-2, -1, 1}, {-2, -1, 2}, {-2, -1, -2}, {-2, -1, -1}, {-1, 0, 0}, {-1, 0, 1}, {-1, 0, 2}, {-1, 0, -2}, {-1, 0, -1}, {-1, 1, 0}, {-1, 1, 1}, {-1, 1, 2}, {-1, 1, -2}, {-1, 1, -1}, {-1, 2, 0}, {-1, 2, 1}, {-1, 2, 2}, {-1, 2, -2}, {-1, 2, -1}, {-1, -2, 0}, {-1, -2, 1}, {-1, -2, 2}, {-1, -2, -2}, {-1, -2, -1}, {-1, -1, 0}, {-1, -1, 1}, {-1, -1, 2}, {-1, -1, -2}, {-1, -1, -1} }; static void grid_point_to_address_double(int address_double[3], const int grid_point, const int mesh[3], const int is_shift[3]); static int get_ir_triplets_at_q(int map_triplets[], int map_q[], int grid_address[][3], const int grid_point, const int mesh[3], const MatINT * rot_reciprocal, const int swappable); static int get_BZ_triplets_at_q(int triplets[][3], const int grid_point, TPLCONST int bz_grid_address[][3], const int bz_map[], const int map_triplets[], const int num_map_triplets, const int mesh[3]); static int get_third_q_of_triplets_at_q(int bz_address[3][3], const int q_index, const int bz_map[], const int mesh[3], const int bzmesh[3]); static void modulo_i3(int v[3], const int m[3]); int tpk_get_ir_triplets_at_q(int map_triplets[], int map_q[], int grid_address[][3], const int grid_point, const int mesh[3], const int is_time_reversal, const MatINT * rotations, const int swappable) { int num_ir; MatINT *rot_reciprocal; rot_reciprocal = kpt_get_point_group_reciprocal(rotations, is_time_reversal); num_ir = get_ir_triplets_at_q(map_triplets, map_q, grid_address, grid_point, mesh, rot_reciprocal, swappable); mat_free_MatINT(rot_reciprocal); return num_ir; } int tpk_get_BZ_triplets_at_q(int triplets[][3], const int grid_point, TPLCONST int bz_grid_address[][3], const int bz_map[], const int map_triplets[], const int num_map_triplets, const int mesh[3]) { return get_BZ_triplets_at_q(triplets, grid_point, bz_grid_address, bz_map, map_triplets, num_map_triplets, mesh); } static int get_ir_triplets_at_q(int map_triplets[], int map_q[], int grid_address[][3], const int grid_point, const int mesh[3], const MatINT * rot_reciprocal, const int swappable) { int i, j, num_grid, q_2, num_ir_q, num_ir_triplets, ir_grid_point; int mesh_double[3], is_shift[3]; int address_double0[3], address_double1[3], address_double2[3]; int *ir_grid_points, *third_q; double tolerance; double stabilizer_q[1][3]; MatINT *rot_reciprocal_q; tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]); num_grid = mesh[0] * mesh[1] * mesh[2]; for (i = 0; i < 3; i++) { /* Only consider the gamma-point */ is_shift[i] = 0; mesh_double[i] = mesh[i] * 2; } /* Search irreducible q-points (map_q) with a stabilizer */ /* q */ grid_point_to_address_double(address_double0, grid_point, mesh, is_shift); for (i = 0; i < 3; i++) { stabilizer_q[0][i] = (double)address_double0[i] / mesh_double[i] - (address_double0[i] > mesh[i]); } rot_reciprocal_q = kpt_get_point_group_reciprocal_with_q(rot_reciprocal, tolerance, 1, stabilizer_q); num_ir_q = kpt_get_irreducible_reciprocal_mesh(grid_address, map_q, mesh, is_shift, rot_reciprocal_q); mat_free_MatINT(rot_reciprocal_q); third_q = (int*) malloc(sizeof(int) * num_ir_q); ir_grid_points = (int*) malloc(sizeof(int) * num_ir_q); num_ir_q = 0; for (i = 0; i < num_grid; i++) { if (map_q[i] == i) { ir_grid_points[num_ir_q] = i; num_ir_q++; } map_triplets[i] = -1; } #pragma omp parallel for private(j, address_double1, address_double2) for (i = 0; i < num_ir_q; i++) { grid_point_to_address_double(address_double1, ir_grid_points[i], mesh, is_shift); /* q' */ for (j = 0; j < 3; j++) { /* q'' */ address_double2[j] = - address_double0[j] - address_double1[j]; } third_q[i] = kgd_get_grid_point_double_mesh(address_double2, mesh); } num_ir_triplets = 0; if (swappable) { /* search q1 <-> q2 */ for (i = 0; i < num_ir_q; i++) { ir_grid_point = ir_grid_points[i]; q_2 = third_q[i]; if (map_triplets[map_q[q_2]] > -1) { map_triplets[ir_grid_point] = map_q[q_2]; } else { map_triplets[ir_grid_point] = ir_grid_point; num_ir_triplets++; } } } else { for (i = 0; i < num_ir_q; i++) { ir_grid_point = ir_grid_points[i]; map_triplets[ir_grid_point] = ir_grid_point; num_ir_triplets++; } } #pragma omp parallel for for (i = 0; i < num_grid; i++) { map_triplets[i] = map_triplets[map_q[i]]; } free(third_q); third_q = NULL; free(ir_grid_points); ir_grid_points = NULL; return num_ir_triplets; } static int get_BZ_triplets_at_q(int triplets[][3], const int grid_point, TPLCONST int bz_grid_address[][3], const int bz_map[], const int map_triplets[], const int num_map_triplets, const int mesh[3]) { int i, j, k, num_ir; int bz_address[3][3], bz_address_double[3], bzmesh[3]; int *ir_grid_points; for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; } num_ir = 0; ir_grid_points = (int*) malloc(sizeof(int) * num_map_triplets); for (i = 0; i < num_map_triplets; i++) { if (map_triplets[i] == i) { ir_grid_points[num_ir] = i; num_ir++; } } #pragma omp parallel for private(j, k, bz_address, bz_address_double) for (i = 0; i < num_ir; i++) { for (j = 0; j < 3; j++) { bz_address[0][j] = bz_grid_address[grid_point][j]; bz_address[1][j] = bz_grid_address[ir_grid_points[i]][j]; bz_address[2][j] = - bz_address[0][j] - bz_address[1][j]; } for (j = 2; j > -1; j--) { if (get_third_q_of_triplets_at_q(bz_address, j, bz_map, mesh, bzmesh) == 0) { break; } } for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { bz_address_double[k] = bz_address[j][k] * 2; } triplets[i][j] = bz_map[kgd_get_grid_point_double_mesh(bz_address_double, bzmesh)]; } } free(ir_grid_points); return num_ir; } static int get_third_q_of_triplets_at_q(int bz_address[3][3], const int q_index, const int bz_map[], const int mesh[3], const int bzmesh[3]) { int i, j, smallest_g, smallest_index, sum_g, delta_g[3]; int bzgp[KPT_NUM_BZ_SEARCH_SPACE], bz_address_double[3]; modulo_i3(bz_address[q_index], mesh); for (i = 0; i < 3; i++) { delta_g[i] = 0; for (j = 0; j < 3; j++) { delta_g[i] += bz_address[j][i]; } delta_g[i] /= mesh[i]; } for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) { for (j = 0; j < 3; j++) { bz_address_double[j] = (bz_address[q_index][j] + bz_search_space[i][j] * mesh[j]) * 2; } bzgp[i] = bz_map[kgd_get_grid_point_double_mesh(bz_address_double, bzmesh)]; } for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) { if (bzgp[i] != -1) { goto escape; } } escape: smallest_g = 4; smallest_index = 0; for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) { if (bzgp[i] > -1) { /* q'' is in BZ */ sum_g = (abs(delta_g[0] + bz_search_space[i][0]) + abs(delta_g[1] + bz_search_space[i][1]) + abs(delta_g[2] + bz_search_space[i][2])); if (sum_g < smallest_g) { smallest_index = i; smallest_g = sum_g; } } } for (i = 0; i < 3; i++) { bz_address[q_index][i] += bz_search_space[smallest_index][i] * mesh[i]; } return smallest_g; } static void grid_point_to_address_double(int address_double[3], const int grid_point, const int mesh[3], const int is_shift[3]) { int i; int address[3]; #ifndef GRID_ORDER_XYZ address[2] = grid_point / (mesh[0] * mesh[1]); address[1] = (grid_point - address[2] * mesh[0] * mesh[1]) / mesh[0]; address[0] = grid_point % mesh[0]; #else address[0] = grid_point / (mesh[1] * mesh[2]); address[1] = (grid_point - address[0] * mesh[1] * mesh[2]) / mesh[2]; address[2] = grid_point % mesh[2]; #endif for (i = 0; i < 3; i++) { address_double[i] = address[i] * 2 + is_shift[i]; } } static void modulo_i3(int v[3], const int m[3]) { int i; for (i = 0; i < 3; i++) { v[i] = v[i] % m[i]; if (v[i] < 0) { v[i] += m[i]; } } }
sgetrs.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgetrs.c, normal z -> s, Fri Sep 28 17:38:06 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * ******************************************************************************/ int plasma_sgetrs(int n, int nrhs, float *pA, int lda, int *ipiv, float *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } if (n < 0) { plasma_error("illegal value of n"); return -1; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -2; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -4; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -7; } // quick return if (imin(n, nrhs) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_trsm(plasma, PlasmaRealFloat, n, n); // Set tiling parameters. int nb = plasma->nb; // Initialize barrier. plasma_barrier_init(&plasma->barrier); // Create tile matrix. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb, n, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_sge2desc(pA, lda, A, &sequence, &request); plasma_omp_sge2desc(pB, ldb, B, &sequence, &request); // Call the tile async function. plasma_omp_sgetrs(A, ipiv, B, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_sdesc2ge(B, pB, ldb, &sequence, &request); } // Free matrix A in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * ******************************************************************************/ void plasma_omp_sgetrs(plasma_desc_t A, int *ipiv, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid B"); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.n == 0 || B.n == 0) return; // Call the parallel functions. plasma_psgeswp(PlasmaRowwise, B, ipiv, 1, sequence, request); plasma_pstrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit, 1.0, A, B, sequence, request); plasma_pstrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit, 1.0, A, B, sequence, request); }
ompfor7.c
/* * test #define * Liao 12/1/2010 */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif #define P 4 void foo(int iend, int ist) { int i; i= i+P; #pragma omp parallel { #pragma omp single printf ("Using %d threads.\n",omp_get_num_threads()); #pragma omp for nowait schedule(static,P) for (i=iend;i>=ist;i--) { printf("Iteration %d is carried out by thread %d\n",i, omp_get_thread_num()); } } }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 32; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_unop__minv_uint16_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_uint16_uint16) // op(A') function: GB (_unop_tran__minv_uint16_uint16) // C type: uint16_t // A type: uint16_t // cast: uint16_t cij = aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 16) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 16) ; // casting #define GB_CAST(z, aij) \ uint16_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = aij ; \ Cx [pC] = GB_IMINV_UNSIGNED (z, 16) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_uint16_uint16) ( uint16_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; uint16_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 16) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; uint16_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 16) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_uint16_uint16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__one_int32_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__one_int32_int32 // op(A') function: GB_tran__one_int32_int32 // C type: int32_t // A type: int32_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ int32_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CASTING(z, x) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__one_int32_int32 ( int32_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__one_int32_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
SparsifyCollapsingMex.c
#include "mex.h" #include <omp.h> #include <math.h> void splitMatrices(mwIndex startRow, mwIndex endRow, mwIndex* R_A0 , mwIndex *starts_A0, double *V_A0 , mwIndex *R_Aomega , mwIndex *starts_Aomega, double *V_Aomega); void Sparsify(mwIndex startRow, mwIndex endRow, mwIndex naux, mwIndex* R_A0 , mwIndex *starts_A0, double *V_A0 , mwIndex *R_Aomega , mwIndex *starts_Aomega, double *V_Aomega, mwIndex *R_RP0 , mwIndex *starts_RP0 , double *V_RP0, mwIndex *R_R0P , mwIndex *starts_R0P , double *V_R0P, mwIndex *C_A0_cols, mwIndex *starts_A0_cols,double *V_A0_cols, mwIndex* globalDiagonal); void SparsifyAki(mwIndex k, mwIndex i, double Aki, mwIndex* R_A0 , mwIndex *starts_A0 , double *V_A0, mwIndex *R_RP0 , mwIndex *starts_RP0 , double *V_RP0, mwIndex *C_R0P , mwIndex *starts_R0P , double *V_R0P, mwIndex *C_A0_cols, mwIndex *starts_A0_cols, double *V_A0_cols, mwIndex *intI1I2 , mwIndex *auxI1 , mwIndex *auxI2, mwIndex *auxm1 , mwIndex *auxm2 , double* thetta, mwIndex* globalDiagonal ,mwIndex naux_list); int intersect(mwIndex* A, mwIndex* B, mwIndex startA, mwIndex endA, mwIndex startB, mwIndex endB, mwIndex* ans, mwIndex* iA, mwIndex* iB); // mwIndex setDiff(mwIndex* A, mwIndex* B, mwIndex startA, mwIndex endA, // mwIndex startB, mwIndex endB, mwIndex* ans, mwIndex* i); mwIndex findBinarySearch(mwIndex* A, mwIndex startA, mwIndex endA, mwIndex toFind); void GenerateDiagonalGlobalIndices(mwIndex startRow, mwIndex endRow, mwIndex* R_A0, mwIndex* starts_A0, mwIndex* globalDiagonal); void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { //mex -O 'CXXOPTIMFLAGS=-DNDEBUG -O3' -largeArrayDims SparsifyCollapsingMex.c COMPFLAGS="$COMPFLAGS -openmp" LINKFALGS="$LINKFALGS -openmp" mwIndex id, Nthrds, istart, iend; mwIndex naux = 0; // // mwIndex *R_A0 = mxGetIr(prhs[0]); mwIndex *starts_A0 = mxGetJc(prhs[0]); double *V_A0 = mxGetPr(prhs[0]); //mwIndex nzmax_A0 = mxGetNzmax(prhs[0]); mwIndex n = mxGetN(prhs[0]); mwIndex *R_Aomega = mxGetIr(prhs[1]); mwIndex *starts_Aomega = mxGetJc(prhs[1]); double *V_Aomega = mxGetPr(prhs[1]); mwIndex nzmax_Aomega = mxGetNzmax(prhs[1]); mwIndex *R_RP0 = mxGetIr(prhs[2]); mwIndex *starts_RP0 = mxGetJc(prhs[2]); double *V_RP0 = mxGetPr(prhs[2]); mwIndex *C_R0P = mxGetIr(prhs[3]); mwIndex *starts_R0P = mxGetJc(prhs[3]); double *V_R0P = mxGetPr(prhs[3]); mwIndex *C_A0_cols = mxGetIr(prhs[4]); mwIndex *starts_A0_cols = mxGetJc(prhs[4]); double *V_A0_cols = mxGetPr(prhs[4]); mwIndex* globalDiagonal = (mwIndex*)malloc(n*sizeof(mwIndex)); #pragma omp parallel private(id, Nthrds, istart, iend) num_threads(omp_get_num_procs()) { //printf("%d",); id = omp_get_thread_num(); Nthrds = omp_get_num_threads(); istart = id * n / Nthrds; iend = (id+1) * n / Nthrds; if (id == Nthrds-1) iend = n; splitMatrices(istart, iend, R_A0, starts_A0, V_A0, R_Aomega, starts_Aomega, V_Aomega); #pragma omp barrier GenerateDiagonalGlobalIndices(0,n,R_A0,starts_A0,globalDiagonal); #pragma omp barrier Sparsify(istart,iend,naux, R_A0 , starts_A0, V_A0, R_Aomega, starts_Aomega, V_Aomega, R_RP0, starts_RP0, V_RP0, C_R0P, starts_R0P, V_R0P, C_A0_cols, starts_A0_cols, V_A0_cols,globalDiagonal); #pragma omp barrier } free(globalDiagonal); } void Sparsify(mwIndex startRow, mwIndex endRow, mwIndex naux, mwIndex* R_A0 , mwIndex *starts_A0, double *V_A0 , mwIndex *R_Aomega , mwIndex *starts_Aomega, double *V_Aomega, mwIndex *R_RP0 , mwIndex *starts_RP0 , double *V_RP0, mwIndex *C_R0P , mwIndex *starts_R0P , double *V_R0P, mwIndex *C_A0_cols, mwIndex *starts_A0_cols,double *V_A0_cols, mwIndex* globalDiagonal){ mwIndex k,i, global_idx, naux_list; mwIndex *intI1I2, *auxI1, *auxI2, *auxm1, *auxm2; double *thetta; double Aki; for (k = startRow ; k < endRow ;++k){ if ((starts_A0[k+1] - starts_A0[k])>naux){ naux = (starts_A0[k+1] - starts_A0[k]); } } for (k = startRow ; k < endRow ;++k){ if ((starts_A0_cols[k+1] - starts_A0_cols[k])>naux){ naux = (starts_A0_cols[k+1] - starts_A0_cols[k]); } } naux = naux + 5; naux_list = naux*naux; intI1I2 = (mwIndex*)malloc(naux*sizeof(mwIndex)); auxI1 = (mwIndex*)malloc(naux*sizeof(mwIndex)); auxI2 = (mwIndex*)malloc(naux*sizeof(mwIndex)); auxm1 = (mwIndex*)malloc(naux_list*sizeof(mwIndex)); auxm2 = (mwIndex*)malloc(naux_list*sizeof(mwIndex)); thetta = (double*)malloc(naux_list*sizeof(double)); for (k = startRow ; k < endRow ;++k){ for (global_idx = starts_Aomega[k] ; global_idx < starts_Aomega[k+1]; ++global_idx){ Aki = V_Aomega[global_idx]; i = R_Aomega[global_idx]; if (Aki!=0){ //printf("Sparsifing (%d,%d,%lf)\n",k,i,Aki); SparsifyAki(k,i,Aki,R_A0 , starts_A0, V_A0, R_RP0, starts_RP0, V_RP0, C_R0P, starts_R0P, V_R0P, C_A0_cols, starts_A0_cols, V_A0_cols, intI1I2, auxI1, auxI2, auxm1, auxm2, thetta, globalDiagonal,naux_list); } } } free(intI1I2); free(auxI1); free(auxI2); free(auxm1); free(auxm2); free(thetta); } void SparsifyAki(mwIndex k, mwIndex i, double Aki, mwIndex* R_A0 , mwIndex *starts_A0 , double *V_A0, mwIndex *R_RP0 , mwIndex *starts_RP0 , double *V_RP0, mwIndex *C_R0P , mwIndex *starts_R0P , double *V_R0P, mwIndex *C_A0_cols, mwIndex *starts_A0_cols, double *V_A0_cols, mwIndex *intI1I2 , mwIndex *auxI1 , mwIndex *auxI2, mwIndex *auxm1 , mwIndex *auxm2 , double* thetta, mwIndex* globalDiagonal ,mwIndex naux_list){ mwIndex m1,m2, it, n_intersect, n_intersect2, m1it, list_it, global_km2,global_m1i,global_m1m1,global_m2m2,global_m2m1,kk; mwIndex *tmp_ind=0; double *tmp_double=0; double delta,R0Pm1i; // printf("k:%ld, i:%ld, Aki: %lf\n",k,i,Aki); // Now we check distance 2 paths between i and k n_intersect = intersect(C_R0P , R_RP0, starts_R0P[i] , starts_R0P[i+1], starts_RP0[k], starts_RP0[k+1],intI1I2, auxI1, auxI2); //printf("n_intersect: %d\n",n_intersect); if (n_intersect > 0){ delta = 0; for (it = 0 ; it < n_intersect ; it++){ thetta[it] = fabs(V_R0P[auxI1[it]]*V_RP0[auxI2[it]]); //printf("mex: V_R0P:%lf; V_RP0: %lf\n",V_R0P[auxI1[it]],V_RP0[auxI2[it]]); delta += thetta[it]; } delta = Aki/delta; for (it = 0 ; it < n_intersect ; it++){ thetta[it] *= delta; } for (it = 0 ; it < n_intersect ; it++){ m1 = intI1I2[it]; delta = thetta[it]; // printf("mex: %ld->%ld->%ld:%lf\n",i,m1,k,delta); // Here: m1==m2 // here we assume that A0 contains R0P and P0R global_km2 = findBinarySearch(R_A0, starts_A0[k], starts_A0[k+1], m1); global_m1i = findBinarySearch(R_A0, starts_A0[m1], starts_A0[m1+1], i); global_m1m1 = globalDiagonal[m1]; // global_m1m1 = findBinarySearch(R_A0, starts_A0[m1], starts_A0[m1+1], m1); #pragma omp atomic V_A0[global_km2] += delta; #pragma omp atomic V_A0[global_m1i] += delta; #pragma omp atomic V_A0[global_m1m1] -= delta; } }else{ // Now there is no distance 2 path. We seek distance 3 paths. list_it = 0; delta = 0; for (m1it = starts_R0P[i] ; m1it < starts_R0P[i+1] ; m1it++){ m1 = C_R0P[m1it]; R0Pm1i = V_R0P[m1it]; n_intersect2 = intersect(C_A0_cols , R_RP0, starts_A0_cols[m1] , starts_A0_cols[m1+1], starts_RP0[k], starts_RP0[k+1],intI1I2, auxI1, auxI2); for (it = 0 ; it < n_intersect2 ; it++){ auxm1[list_it] = m1; auxm2[list_it] = intI1I2[it]; thetta[list_it] = fabs(R0Pm1i*V_A0_cols[auxI1[it]]*V_RP0[auxI2[it]]); delta+=thetta[list_it]; ++list_it; if (list_it == naux_list){ printf("We're out of place in auxiliary arrays, but don't worry - we double naux_list\n"); tmp_ind = (mwIndex*)malloc(2*naux_list*sizeof(mwIndex)); for (kk=0 ; kk<naux_list ; ++kk){ tmp_ind[kk] = auxm1[kk]; } free(auxm1); auxm1 = tmp_ind; tmp_ind = (mwIndex*)malloc(2*naux_list*sizeof(mwIndex)); for (kk=0 ; kk<naux_list ; ++kk){ tmp_ind[kk] = auxm2[kk]; } free(auxm2); auxm2 = tmp_ind; tmp_double = (double*)malloc(2*naux_list*sizeof(double)); for (kk=0 ; kk<naux_list ; ++kk){ tmp_double[kk] = thetta[kk]; } free(thetta); thetta = tmp_double; return; } } } delta = Aki/delta; for (it = 0 ; it < list_it ; it++){ thetta[it] *= delta; } for (it = 0 ; it < list_it ; it++){ m1 = auxm1[it]; m2 = auxm2[it]; delta = thetta[it]; // printf("mex: %ld->%ld->%ld->%ld:%lf\n",i,m1,m2,k,delta); global_km2 = findBinarySearch(R_A0, starts_A0[k], starts_A0[k+1], m2); global_m1i = findBinarySearch(R_A0, starts_A0[m1], starts_A0[m1+1], i); // global_m1m1 = findBinarySearch(R_A0, starts_A0[m1], starts_A0[m1+1], m1); // global_m2m2 = findBinarySearch(R_A0, starts_A0[m2], starts_A0[m2+1], m2); global_m2m2 = globalDiagonal[m2]; global_m1m1 = globalDiagonal[m1]; global_m2m1 = findBinarySearch(R_A0, starts_A0[m2], starts_A0[m2+1], m1); #pragma omp atomic V_A0[global_m2m2] -= delta; #pragma omp atomic V_A0[global_m2m1] += delta; #pragma omp atomic V_A0[global_km2] += delta; #pragma omp atomic V_A0[global_m1i] += delta; #pragma omp atomic V_A0[global_m1m1] -= delta; } } } void GenerateDiagonalGlobalIndices(mwIndex startRow, mwIndex endRow, mwIndex* R_A0, mwIndex* starts_A0, mwIndex* globalDiagonal){ mwIndex k; for (k = startRow ; k < endRow ;++k){ globalDiagonal[k] = findBinarySearch(R_A0, starts_A0[k], starts_A0[k+1], k); } } void splitMatrices(mwIndex startRow, mwIndex endRow, mwIndex* R_A0 , mwIndex *starts_A0, double *V_A0 , mwIndex *R_Aomega , mwIndex *starts_Aomega, double *V_Aomega){ mwIndex k,global_idx0, global_idx,tmp0, tmpOmega; // SPLITTING for (k = startRow ; k < endRow ;++k){ for (global_idx0 = starts_A0[k] ; global_idx0 < starts_A0[k+1] ; ++global_idx0){ V_A0[global_idx0] = 0; } global_idx0 = starts_A0[k]; global_idx = starts_Aomega[k]; while (global_idx < starts_Aomega[k+1] && global_idx0 < starts_A0[k+1]){ tmp0 = R_A0[global_idx0]; tmpOmega = R_Aomega[global_idx]; if (tmp0 == tmpOmega){ V_A0[global_idx0] = V_Aomega[global_idx]; V_Aomega[global_idx] = 0; ++global_idx0; ++global_idx; }else{ global_idx0 += (tmp0 < tmpOmega); global_idx += (tmp0 > tmpOmega); } } } } int intersect(mwIndex* A, mwIndex* B, mwIndex startA, mwIndex endA, mwIndex startB, mwIndex endB, mwIndex* AB, mwIndex* iA, mwIndex* iB) { mwIndex ia = startA, ib = startB; mwIndex k = 0; mwIndex tmpA,tmpB; if (A[startA] > B[endB-1] || A[endA-1] < B[startB]){ return k; } while ((ia < endA) && (ib < endB)){ tmpA = A[ia]; tmpB = B[ib]; if (tmpA == tmpB){ AB[k] = tmpA; iA[k] = ia++; iB[k] = ib++; ++k; }else{ ia += tmpA < tmpB; ib += tmpA > tmpB; } } return k; } mwIndex findBinarySearch(mwIndex* A, mwIndex startA, mwIndex endA, mwIndex toFind){ mwIndex i; for (i = startA ; i < endA ; ++i){ if (A[i]==toFind){ return i; } } printf("INDEX NOT FOUND!!! IMPOSSIBLE - THERE'S A BUG"); return 0; // mwIndex mid; // endA = endA-1; // // continually narrow search until just one element remains // while (startA < endA) // { // mid = (startA+endA)>>1; // // printf("mid = %ld\n",mid); // // reduce the search // if (A[mid] < toFind){ // startA = mid+1; // // printf("start = %ld\n",startA); // }else{ // endA = mid; // // printf("end = %ld\n",endA); // } // } // if ((endA == startA) && (A[startA] == toFind)) // return startA; // else{ // printf("INDEX NOT FOUND!!! IMPOSSIBLE - THERE'S A BUG"); // return 0; // } } // mwIndex setDiff(mwIndex* A, mwIndex* B, mwIndex startA, mwIndex endA, // mwIndex startB, mwIndex endB, mwIndex* AB, mwIndex* IA){ // mwIndex ia = startA, ib = startB; // mwIndex k = 0; // mwIndex tmpA,tmpB; // while ((ia < endA) && (ib < endB)){ // tmpA = A[ia]; // tmpB = B[ib]; // if (tmpA < tmpB){ // IA[k] = ++ia; // AB[k] = tmpA; // ++k; // }else{ // ia += tmpA == tmpB; // ib += tmpA >= tmpB; // } // } // while (ia < endA){ // AB[k] = A[ia]; // IA[k] = ++ia; // ++k; // } // } //
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return omp_get_max_threads(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE int ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { int ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const int idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (int i = ndim-1, j = idx; i >=0; --i) { int tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE int dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { int ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE int unravel_dot(const int idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { int ret = 0; #pragma unroll for (int i = ndim-1, j = idx; i >=0; --i) { int tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(), from.FlatTo1D<xpu, DType>(), s); } else { MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() functoion * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() functoion */ template<typename ...Args> inline static void Launch(mshadow::Stream<cpu> *, const int N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (int i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() functoion * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() functoion */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const int N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( static_cast<size_t>(N), static_cast<size_t>(omp_threads))) { for (int i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() functoion * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const int N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const int length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < N; i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); } }; #endif // __CUDACC__ /*! * \brief Wrap Kernel<OP, xpu>::Launch* with some special-case helpers */ template<typename OP, typename xpu> struct KernelWrapper { /*! * \brief Launch 'mshadow_op-type' op (i.e. DType (*)( ... ) { return <operation> } * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream object pointer (unused) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() functoion */ template<typename DType, typename ...Args> MSHADOW_CINLINE static void LaunchMShadowOpEx(mshadow::Stream<xpu> *s, const int N, DType *dest, Args... args) { mxnet::op::mxnet_op::Kernel<OP, xpu>::template LaunchTuned< typename OP::Operation, DType>(s, N, dest, args...); } /*! * \brief Launch 'mxnet_op-type' op (i.e. void (*)(int N, DType *out, ... ) * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream object pointer (unused) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() functoion */ template<typename DType, typename ...Args> MSHADOW_CINLINE static void LaunchMXNetOpEx(mshadow::Stream<xpu> *s, const int N, DType *dest, Args... args) { mxnet::op::mxnet_op::Kernel<OP, xpu>::template LaunchTuned<OP, DType>(s, N, dest, args...); } }; /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; _MXNET_TUNABLE_MXNET_OP_FWD(set_zero); // _ prefix denotes "already in mxnet_op namespace" _MXNET_TUNABLE_MXNET_OP_FWD(set_one); } // namespace mxnet_op /*! * \brief Tuning specializations for the simple ops in <mshadow/base.h> * Basically, this overrides mxnet::op::mxnet_op::Kernel<OP, cpu>::Launch() and * redirects to mxnet::op::mxnet_op::KernelWrapper<OP, cpu>::Launch????OpEx(), * which eventually leads back to mxnet::op::mxnet_op::Kernel<OP, cpu>::LaunchTuned() */ MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD(mshadow::op::identity) MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD(mshadow::op::plus) MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD(mshadow::op::minus) MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD(mshadow::op::mul) MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD(mshadow::op::div) MXNET_TUNABLE_MSHADOW_OP_FWD_AND_BWD(mshadow::op::right) } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
alignment.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ /* Original code from the Application Kernel Matrix by Cray */ /* that was based on the ClustalW application */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <libgen.h> #include "sequence.h" #include "param.h" #include "alignment.h" #include "bots.h" int ktup, window, signif; int prot_ktup, prot_window, prot_signif; int gap_pos1, gap_pos2, mat_avscore; int nseqs, max_aa; #define MAX_ALN_LENGTH 5000 int *seqlen_array, def_aa_xref[NUMRES+1]; int *bench_output, *seq_output; double gap_open, gap_extend; double prot_gap_open, prot_gap_extend; double pw_go_penalty, pw_ge_penalty; double prot_pw_go_penalty, prot_pw_ge_penalty; char **args, **names, **seq_array; int matrix[NUMRES][NUMRES]; double gap_open_scale; double gap_extend_scale; // dnaFlag default value is false int dnaFlag = FALSE; // clustalw default value is false int clustalw = FALSE; #define INT_SCALE 100 #define MIN(a,b) ((a)<(b)?(a):(b)) #define tbgap(k) ((k) <= 0 ? 0 : tb + gh * (k)) #define tegap(k) ((k) <= 0 ? 0 : te + gh * (k)) /*********************************************************************** * : **********************************************************************/ void del(int k, int *print_ptr, int *last_print, int *displ) { if (*last_print<0) *last_print = displ[(*print_ptr)-1] -= k; else *last_print = displ[(*print_ptr)++] = -k; } /*********************************************************************** * : **********************************************************************/ void add(int v, int *print_ptr, int *last_print, int *displ) { if (*last_print < 0) { displ[(*print_ptr)-1] = v; displ[(*print_ptr)++] = *last_print; } else { *last_print = displ[(*print_ptr)++] = v; } } /*********************************************************************** * : **********************************************************************/ int calc_score(int iat, int jat, int v1, int v2, int seq1, int seq2) { int i, j, ipos, jpos; ipos = v1 + iat; jpos = v2 + jat; i = seq_array[seq1][ipos]; j = seq_array[seq2][jpos]; return (matrix[i][j]); } /*********************************************************************** * : **********************************************************************/ int get_matrix(int *matptr, int *xref, int scale) { int gg_score = 0; int gr_score = 0; int i, j, k, ti, tj, ix; int av1, av2, av3, min, max, maxres; for (i = 0; i <= max_aa; i++) for (j = 0; j <= max_aa; j++) matrix[i][j] = 0; ix = 0; maxres = 0; for (i = 0; i <= max_aa; i++) { ti = xref[i]; for (j = 0; j <= i; j++) { tj = xref[j]; if ((ti != -1) && (tj != -1)) { k = matptr[ix]; if (ti == tj) { matrix[ti][ti] = k * scale; maxres++; } else { matrix[ti][tj] = k * scale; matrix[tj][ti] = k * scale; } ix++; } } } maxres--; av1 = av2 = av3 = 0; for (i = 0; i <= max_aa; i++) { for (j = 0; j <= i; j++) { av1 += matrix[i][j]; if (i == j) av2 += matrix[i][j]; else av3 += matrix[i][j]; } } av1 /= (maxres*maxres)/2; av2 /= maxres; av3 /= (int) (((double)(maxres*maxres-maxres))/2); mat_avscore = -av3; min = max = matrix[0][0]; for (i = 0; i <= max_aa; i++) for (j = 1; j <= i; j++) { if (matrix[i][j] < min) min = matrix[i][j]; if (matrix[i][j] > max) max = matrix[i][j]; } for (i = 0; i < gap_pos1; i++) { matrix[i][gap_pos1] = gr_score; matrix[gap_pos1][i] = gr_score; matrix[i][gap_pos2] = gr_score; matrix[gap_pos2][i] = gr_score; } matrix[gap_pos1][gap_pos1] = gg_score; matrix[gap_pos2][gap_pos2] = gg_score; matrix[gap_pos2][gap_pos1] = gg_score; matrix[gap_pos1][gap_pos2] = gg_score; maxres += 2; return(maxres); } /*********************************************************************** * : **********************************************************************/ void forward_pass(char *ia, char *ib, int n, int m, int *se1, int *se2, int *maxscore, int g, int gh) { int i, j, f, p, t, hh; int HH[MAX_ALN_LENGTH]; int DD[MAX_ALN_LENGTH]; *maxscore = 0; *se1 = *se2 = 0; for (i = 0; i <= m; i++) {HH[i] = 0; DD[i] = -g;} for (i = 1; i <= n; i++) { hh = p = 0; f = -g; for (j = 1; j <= m; j++) { f -= gh; t = hh - g - gh; if (f < t) f = t; DD[j] -= gh; t = HH[j] - g - gh; if (DD[j] < t) DD[j] = t; hh = p + matrix[(int)ia[i]][(int)ib[j]]; if (hh < f) hh = f; if (hh < DD[j]) hh = DD[j]; if (hh < 0) hh = 0; p = HH[j]; HH[j] = hh; if (hh > *maxscore) {*maxscore = hh; *se1 = i; *se2 = j;} } } } /*********************************************************************** * : **********************************************************************/ void reverse_pass(char *ia, char *ib, int se1, int se2, int *sb1, int *sb2, int maxscore, int g, int gh) { int i, j, f, p, t, hh, cost; int HH[MAX_ALN_LENGTH]; int DD[MAX_ALN_LENGTH]; cost = 0; *sb1 = *sb2 = 1; for (i = se2; i > 0; i--){ HH[i] = -1; DD[i] = -1;} for (i = se1; i > 0; i--) { hh = f = -1; if (i == se1) p = 0; else p = -1; for (j = se2; j > 0; j--) { f -= gh; t = hh - g - gh; if (f < t) f = t; DD[j] -= gh; t = HH[j] - g - gh; if (DD[j] < t) DD[j] = t; hh = p + matrix[(int)ia[i]][(int)ib[j]]; if (hh < f) hh = f; if (hh < DD[j]) hh = DD[j]; p = HH[j]; HH[j] = hh; if (hh > cost) { cost = hh; *sb1 = i; *sb2 = j; if (cost >= maxscore) break; } } if (cost >= maxscore) break; } } /*********************************************************************** * : **********************************************************************/ int diff (int A, int B, int M, int N, int tb, int te, int *print_ptr, int *last_print, int *displ, int seq1, int seq2, int g, int gh) { int i, j, f, e, s, t, hh; int midi, midj, midh, type; int HH[MAX_ALN_LENGTH]; int DD[MAX_ALN_LENGTH]; int RR[MAX_ALN_LENGTH]; int SS[MAX_ALN_LENGTH]; if (N <= 0) {if (M > 0) del(M, print_ptr, last_print, displ); return( - (int) tbgap(M)); } if (M <= 1) { if (M <= 0) {add(N, print_ptr, last_print, displ); return( - (int)tbgap(N));} midh = -(tb+gh) - tegap(N); hh = -(te+gh) - tbgap(N); if (hh > midh) midh = hh; midj = 0; for (j = 1; j <= N; j++) { hh = calc_score(1,j,A,B,seq1,seq2) - tegap(N-j) - tbgap(j-1); if (hh > midh) {midh = hh; midj = j;} } if (midj == 0) { del(1, print_ptr, last_print, displ); add(N, print_ptr, last_print, displ); } else { if (midj > 1) add(midj-1, print_ptr, last_print, displ); displ[(*print_ptr)++] = *last_print = 0; if (midj < N) add(N-midj, print_ptr, last_print, displ); } return midh; } midi = M / 2; HH[0] = 0.0; t = -tb; for (j = 1; j <= N; j++) { HH[j] = t = t - gh; DD[j] = t - g; } t = -tb; for (i = 1; i <= midi; i++) { s = HH[0]; HH[0] = hh = t = t - gh; f = t - g; for (j = 1; j <= N; j++) { if ((hh = hh - g - gh) > (f = f - gh)) f = hh; if ((hh = HH[j] - g - gh) > (e = DD[j]- gh)) e = hh; hh = s + calc_score(i,j,A,B,seq1,seq2); if (f > hh) hh = f; if (e > hh) hh = e; s = HH[j]; HH[j] = hh; DD[j] = e; } } DD[0] = HH[0]; RR[N] = 0; t = -te; for (j = N-1; j >= 0; j--) {RR[j] = t = t - gh; SS[j] = t - g;} t = -te; for (i = M - 1; i >= midi; i--) { s = RR[N]; RR[N] = hh = t = t-gh; f = t - g; for (j = N - 1; j >= 0; j--) { if ((hh = hh - g - gh) > (f = f - gh)) f = hh; if ((hh = RR[j] - g - gh) > (e = SS[j] - gh)) e = hh; hh = s + calc_score(i+1,j+1,A,B,seq1,seq2); if (f > hh) hh = f; if (e > hh) hh = e; s = RR[j]; RR[j] = hh; SS[j] = e; } } SS[N] = RR[N]; midh = HH[0] + RR[0]; midj = 0; type = 1; for (j = 0; j <= N; j++) { hh = HH[j] + RR[j]; if (hh >= midh) if (hh > midh || (HH[j] != DD[j] && RR[j] == SS[j])) {midh = hh; midj = j;} } for (j = N; j >= 0; j--) { hh = DD[j] + SS[j] + g; if (hh > midh) {midh = hh;midj = j;type = 2;} } if (type == 1) { diff(A, B, midi, midj, tb, g, print_ptr, last_print, displ, seq1, seq2, g, gh); diff(A+midi, B+midj, M-midi, N-midj, g, te, print_ptr, last_print, displ, seq1, seq2, g, gh); } else { diff(A, B, midi-1, midj, tb, 0.0, print_ptr, last_print, displ, seq1, seq2, g, gh); del(2, print_ptr, last_print, displ); diff(A+midi+1, B+midj, M-midi-1, N-midj, 0.0, te, print_ptr, last_print, displ, seq1, seq2, g, gh); } return midh; } /*********************************************************************** * : **********************************************************************/ double tracepath(int tsb1, int tsb2, int *print_ptr, int *displ, int seq1, int seq2) { int i, k; int i1 = tsb1; int i2 = tsb2; int pos = 0; int count = 0; for (i = 1; i <= *print_ptr - 1; ++i) { if (displ[i]==0) { char c1 = seq_array[seq1][i1]; char c2 = seq_array[seq2][i2]; if ((c1!=gap_pos1) && (c1 != gap_pos2) && (c1 == c2)) count++; ++i1; ++i2; ++pos; } else if ((k = displ[i]) > 0) { i2 += k; pos += k; } else { i1 -= k; pos -= k; } } return (100.0 * (double) count); } int pairalign() { int i, n, m, si, sj; int len1, len2, maxres; double gg, mm_score; int *mat_xref, *matptr; matptr = gon250mt; mat_xref = def_aa_xref; maxres = get_matrix(matptr, mat_xref, 10); if (maxres == 0) return(-1); bots_message("Start aligning "); #pragma omp parallel { #pragma omp for schedule(static) private(i,n,si,sj,len1,m) for (si = 0; si < nseqs; si++) { n = seqlen_array[si+1]; for (i = 1, len1 = 0; i <= n; i++) { char c = seq_array[si+1][i]; if ((c != gap_pos1) && (c != gap_pos2)) len1++; } for (sj = si + 1; sj < nseqs; sj++) { m = seqlen_array[sj+1]; if ( n == 0 || m == 0 ) { bench_output[si*nseqs+sj] = (int) 1.0; } else { #pragma omp task \ private(i,gg,len2,mm_score) firstprivate(m,n,si,sj,len1) \ shared(nseqs, bench_output,seqlen_array,seq_array,gap_pos1,gap_pos2,pw_ge_penalty,pw_go_penalty,mat_avscore) { int se1, se2, sb1, sb2, maxscore, seq1, seq2, g, gh; int displ[2*MAX_ALN_LENGTH+1]; int print_ptr, last_print; for (i = 1, len2 = 0; i <= m; i++) { char c = seq_array[sj+1][i]; if ((c != gap_pos1) && (c != gap_pos2)) len2++; } if ( dnaFlag == TRUE ) { g = (int) ( 2 * INT_SCALE * pw_go_penalty * gap_open_scale ); // gapOpen gh = (int) (INT_SCALE * pw_ge_penalty * gap_extend_scale); //gapExtend } else { gg = pw_go_penalty + log((double) MIN(n, m)); // temporary value g = (int) ((mat_avscore <= 0) ? (2 * INT_SCALE * gg) : (2 * mat_avscore * gg * gap_open_scale) ); // gapOpen gh = (int) (INT_SCALE * pw_ge_penalty); //gapExtend } seq1 = si + 1; seq2 = sj + 1; forward_pass(&seq_array[seq1][0], &seq_array[seq2][0], n, m, &se1, &se2, &maxscore, g, gh); reverse_pass(&seq_array[seq1][0], &seq_array[seq2][0], se1, se2, &sb1, &sb2, maxscore, g, gh); print_ptr = 1; last_print = 0; diff(sb1-1, sb2-1, se1-sb1+1, se2-sb2+1, 0, 0, &print_ptr, &last_print, displ, seq1, seq2, g, gh); mm_score = tracepath(sb1, sb2, &print_ptr, displ, seq1, seq2); if (len1 == 0 || len2 == 0) mm_score = 0.0; else mm_score /= (double) MIN(len1,len2); bench_output[si*nseqs+sj] = (int) mm_score; } // end task } // end if (n == 0 || m == 0) } // for (j) } // end parallel for (i) } // end parallel bots_message(" completed!\n"); return 0; } int pairalign_seq() { int i, n, m, si, sj; int len1, len2, maxres; double gg, mm_score; int *mat_xref, *matptr; matptr = gon250mt; mat_xref = def_aa_xref; maxres = get_matrix(matptr, mat_xref, 10); if (maxres == 0) return(-1); for (si = 0; si < nseqs; si++) { n = seqlen_array[si+1]; for (i = 1, len1 = 0; i <= n; i++) { char c = seq_array[si+1][i]; if ((c != gap_pos1) && (c != gap_pos2)) len1++; } for (sj = si + 1; sj < nseqs; sj++) { m = seqlen_array[sj+1]; if ( n == 0 || m == 0) { seq_output[si*nseqs+sj] = (int) 1.0; } else { int se1, se2, sb1, sb2, maxscore, seq1, seq2, g, gh; int displ[2*MAX_ALN_LENGTH+1]; int print_ptr, last_print; for (i = 1, len2 = 0; i <= m; i++) { char c = seq_array[sj+1][i]; if ((c != gap_pos1) && (c != gap_pos2)) len2++; } if ( dnaFlag == TRUE ) { g = (int) ( 2 * INT_SCALE * pw_go_penalty * gap_open_scale ); // gapOpen gh = (int) (INT_SCALE * pw_ge_penalty * gap_extend_scale); //gapExtend } else { gg = pw_go_penalty + log((double) MIN(n, m)); // temporary value g = (int) ((mat_avscore <= 0) ? (2 * INT_SCALE * gg) : (2 * mat_avscore * gg * gap_open_scale) ); // gapOpen gh = (int) (INT_SCALE * pw_ge_penalty); //gapExtend } seq1 = si + 1; seq2 = sj + 1; forward_pass(&seq_array[seq1][0], &seq_array[seq2][0], n, m, &se1, &se2, &maxscore, g, gh); reverse_pass(&seq_array[seq1][0], &seq_array[seq2][0], se1, se2, &sb1, &sb2, maxscore, g, gh); print_ptr = 1; last_print = 0; diff(sb1-1, sb2-1, se1-sb1+1, se2-sb2+1, 0, 0, &print_ptr, &last_print, displ, seq1, seq2, g, gh); mm_score = tracepath(sb1, sb2, &print_ptr, displ, seq1, seq2); if (len1 == 0 || len2 == 0) mm_score = 0.0; else mm_score /= (double) MIN(len1,len2); seq_output[si*nseqs+sj] = (int) mm_score; } } } return 0; } /*********************************************************************** * : **********************************************************************/ void init_matrix(void) { int i, j; char c1, c2; gap_pos1 = NUMRES - 2; gap_pos2 = NUMRES - 1; max_aa = strlen(amino_acid_codes) - 2; for (i = 0; i < NUMRES; i++) def_aa_xref[i] = -1; for (i = 0; (c1 = amino_acid_order[i]); i++) for (j = 0; (c2 = amino_acid_codes[j]); j++) if (c1 == c2) {def_aa_xref[i] = j; break;} } void pairalign_init (char *filename) { int i; if (!filename || !filename[0]) { bots_error(0, "Please specify an input file with the -f option\n"); } init_matrix(); nseqs = readseqs(filename); bots_message("Multiple Pairwise Alignment (%d sequences)\n", nseqs); for (i = 1; i <= nseqs; i++) bots_debug("Sequence %d: %s %6.d aa\n", i, names[i], seqlen_array[i]); if ( clustalw == TRUE ) { gap_open_scale = 0.6667; gap_extend_scale = 0.751; } else { gap_open_scale = 1.0; gap_extend_scale = 1.0; } if ( dnaFlag == TRUE ) { // Using DNA parameters ktup = 2; window = 4; signif = 4; gap_open = 15.00; gap_extend = 6.66; pw_go_penalty = 15.00; pw_ge_penalty = 6.66; } else { // Using protein parameters ktup = 1; window = 5; signif = 5; gap_open = 10.0; gap_extend = 0.2; pw_go_penalty = 10.0; pw_ge_penalty = 0.1; } } void align_init () { int i,j; bench_output = (int *) malloc(sizeof(int)*nseqs*nseqs); for(i = 0; i<nseqs; i++) for(j = 0; j<nseqs; j++) bench_output[i*nseqs+j] = 0; } void align() { pairalign(); } void align_seq_init () { int i,j; seq_output = (int *) malloc(sizeof(int)*nseqs*nseqs); bench_output = (int *) malloc(sizeof(int)*nseqs*nseqs); for(i = 0; i<nseqs; i++) for(j = 0; j<nseqs; j++) seq_output[i*nseqs+j] = 0; } void align_seq() { pairalign_seq(); } void align_end () { int i,j; for(i = 0; i<nseqs; i++) for(j = 0; j<nseqs; j++) if (bench_output[i*nseqs+j] != 0) bots_debug("Benchmark sequences (%d:%d) Aligned. Score: %d\n", i+1 , j+1 , (int) bench_output[i*nseqs+j]); } int align_verify () { int i,j; int result = BOTS_RESULT_SUCCESSFUL; for(i = 0; i<nseqs; i++) for(j = 0; j<nseqs; j++) if (bench_output[i*nseqs+j] != seq_output[i*nseqs+j]) { bots_message("Error: Optimized prot. (%3d:%3d)=%5d Sequential prot. (%3d:%3d)=%5d\n", i+1, j+1, (int) bench_output[i*nseqs+j], i+1, j+1, (int) seq_output[i*nseqs+j]); result = BOTS_RESULT_UNSUCCESSFUL; } return result; }
sb_sphkmeans.c
/*! \file \brief A parallel spherical k-means program \date Started 4/20/2013 \author George */ #define _SVID_SOURCE #define _DEFAULT_SOURCE #if 0 int main() {} #else #include <GKlib.h> #include <bdmpi.h> #include <sys/types.h> #include <sys/time.h> #include <sys/resource.h> #include <sys/mman.h> #include <unistd.h> #include <malloc.h> #include <pthread.h> /**************************************************************************/ /* data structures */ /**************************************************************************/ typedef struct { int npes, mype, nthreads; BDMPI_Comm comm; int nclusters; int ntrials, niters; char *filename; int mlock; /* the total number of rows and their overall distribution */ int nrows, ncols; int *rowdist; /* timers */ double totalTmr; double compTmr; double commTmr; } params_t; typedef struct { int val; int loc; } vlp_ii_t; /**************************************************************************/ /* prototypes */ /**************************************************************************/ gk_csr_t *LoadData(params_t *params); void WriteClustering(params_t *params, gk_csr_t *mat, int *cvec); void PreprocessData(params_t *params, gk_csr_t *mat); int *ClusterData(params_t *params, gk_csr_t *mat); void ComputeClusteringStatistics(params_t *params, gk_csr_t *mat, int *cpart); void printInCoreInfo(char *msg, int mype, gk_csr_t *mat); /**************************************************************************/ /**************************************************************************/ int main(int argc, char **argv) { params_t *params; gk_csr_t *mat; int *cvec; BDMPI_Status status; double max, current; setbuf(stdout, NULL); setbuf(stderr, NULL); BDMPI_Init(&argc, &argv); params = (params_t *)gk_malloc(sizeof(params_t), "params"); memset(params, 0, sizeof(params_t)); params->comm = BDMPI_COMM_WORLD; BDMPI_Comm_size(params->comm, &(params->npes)); BDMPI_Comm_rank(params->comm, &(params->mype)); if (argc != 7) { if (params->mype == 0) fprintf(stderr, "Usage: %s filename #clusters #trials #iters #threads mlock[0;1]\n", argv[0]); goto DONE; } params->filename = strdup(argv[1]); params->nclusters = atoi(argv[2]); params->ntrials = atoi(argv[3]); params->niters = atoi(argv[4]); params->nthreads = atoi(argv[5]); params->mlock = atoi(argv[6]); printf("[%3d] nclusters: %d, ntrials: %d, niters: %d, nthreads: %d, mlock: %d\n", params->mype, params->nclusters, params->ntrials, params->niters, params->nthreads, params->mlock); omp_set_num_threads(params->nthreads); gk_clearwctimer(params->totalTmr); gk_clearwctimer(params->compTmr); gk_clearwctimer(params->commTmr); BDMPI_Barrier(params->comm); BDMPI_Barrier(params->comm); gk_startwctimer(params->totalTmr); mat = LoadData(params); PreprocessData(params, mat); srand(params->mype+101); printf("[%03d] timestamp01: %zu\n", params->mype, (size_t)time(NULL)); cvec = ClusterData(params, mat); printf("[%03d] timestamp02: %zu\n", params->mype, (size_t)time(NULL)); //WriteClustering(params, mat, cvec); //sbsaveall(); BDMPI_Barrier(params->comm); BDMPI_Barrier(params->comm); gk_stopwctimer(params->totalTmr); /* print timing stats */ current = gk_getwctimer(params->compTmr); BDMPI_Reduce(&current, &max, 1, BDMPI_DOUBLE, BDMPI_MAX, 0, params->comm); if (params->mype == 0) printf(" compTmr: %10.4lf\n", max); current = gk_getwctimer(params->commTmr); BDMPI_Reduce(&current, &max, 1, BDMPI_DOUBLE, BDMPI_MAX, 0, params->comm); if (params->mype == 0) printf(" commTmr: %10.4lf\n", max); current = gk_getwctimer(params->totalTmr); BDMPI_Reduce(&current, &max, 1, BDMPI_DOUBLE, BDMPI_MAX, 0, params->comm); if (params->mype == 0) printf(" totalTmr: %10.4lf\n", max); DONE: BDMPI_Finalize(); return EXIT_SUCCESS; } /**************************************************************************/ /*! Reads a sparse matrix in binary CSR format. The same matrix is read by everybody in order to simulate a large file clustering. \returns the local portion of the matrix. */ /**************************************************************************/ gk_csr_t *LoadData(params_t *params) { int mype=params->mype, npes=params->npes; int lnrows, flag; size_t lnnz; gk_csr_t *mat=NULL; BDMPI_Status status; if (!gk_fexists(params->filename)) gk_errexit(SIGERR, "File %s does not exist!\n", params->filename); mat = gk_csr_Read(params->filename, GK_CSR_FMT_BINROW, 1, 0); lnrows = mat->nrows; lnnz = mat->rowptr[mat->nrows]; params->nrows = lnrows*npes; params->ncols = mat->ncols; printf("[%3d] nrows: %d, ncols: %d, tnnz: %zu, lnrows: %d, lnnz: %zu [ts: %d]\n", mype, params->nrows, params->ncols, npes*lnnz, lnrows, lnnz, (int)time(NULL)); return mat; } /**************************************************************************/ /*! Writes a clustering vector. It just let each process write its portion to the file in a round-robin fashion. */ /**************************************************************************/ void WriteClustering(params_t *params, gk_csr_t *mat, int *cvec) { int npes=params->npes, mype=params->mype, dummy=0; size_t i; BDMPI_Status status; FILE *fpout; char outfile[1024]; sprintf(outfile, "%s.part.%d", params->filename, params->nclusters); if (mype == 0) { fpout = gk_fopen(outfile, "w", "outfile"); for (i=0; i<mat->nrows; i++) fprintf(fpout, "%d\n", cvec[i]); gk_fclose(fpout); if (mype+1 < npes) BDMPI_Send(&dummy, 1, BDMPI_INT, mype+1, 1, params->comm); } else { BDMPI_Recv(&dummy, 1, BDMPI_INT, mype-1, 1, params->comm, &status); fpout = gk_fopen(outfile, "a", "outfile"); for (i=0; i<mat->nrows; i++) fprintf(fpout, "%d\n", cvec[i]); gk_fclose(fpout); if (mype+1 < npes) BDMPI_Send(&mype, 1, BDMPI_INT, mype+1, 1, params->comm); } } /**************************************************************************/ /*! This function performs various pre-processing steps on the matrix. */ /**************************************************************************/ void PreprocessData(params_t *params, gk_csr_t *mat) { gk_csr_Normalize(mat, GK_CSR_ROW, 2); } /**************************************************************************/ /*! This function computes the k-way clustering solution. */ /**************************************************************************/ int *ClusterData(params_t *params, gk_csr_t *mat) { int npes=params->npes, mype=params->mype; size_t trial, iter, i, j, k, offset; int nrows, ncols, nclusters, rnum, *rowind, *cpart=NULL, *bcpart=NULL, *tptr; int lnmoves, gnmoves; ssize_t *rowptr; float *rowval, *centers=NULL; float dnorms[params->nclusters], crval, bcrval; vlp_ii_t lmaxloc, gmaxloc; nclusters = params->nclusters; nrows = mat->nrows; ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; bcpart = gk_imalloc(nrows, "bcpart"); /* perform a number of random trials */ for (bcrval=0.0, trial=0; trial<params->ntrials; trial++) { /* select the initial cluster seeds */ lmaxloc.val = rand(); lmaxloc.loc = mype; BDMPI_Allreduce(&lmaxloc, &gmaxloc, 1, BDMPI_2INT, BDMPI_MAXLOC, params->comm); if (mype == gmaxloc.loc) { /* this pe will be selecting the initial centers */ centers = gk_fsmalloc(nclusters*ncols, 0.0, "centers"); /* pick the centers */ rnum = RandomInRange(nrows/nclusters); for (k=0; k<nclusters; k++) { i = ((k+1)*rnum)%nrows; for (j=rowptr[i]; j<rowptr[i+1]; j++) centers[rowind[j]*nclusters+k] = rowval[j]; } } /* get into the iterative refinement */ cpart = gk_ismalloc(nrows, -1, "cpart"); for (iter=0; iter<params->niters; iter++) { if (mype == gmaxloc.loc) { printf("Working on trial: %zu, iter: %zu\n", trial, iter); } else { centers = gk_fmalloc(nclusters*ncols, "centers"); } BDMPI_Bcast(centers, ncols*nclusters, BDMPI_FLOAT, gmaxloc.loc, params->comm); printf("[%03d]%04zu.%04zu.0 ts: %d\n", mype, trial, iter, (int)time(NULL)); if (params->mlock) GKWARN(BDMPI_mlockall(MCL_CURRENT) == 0); printf("[%03d]%04zu.%04zu.1 ts: %d\n", mype, trial, iter, (int)time(NULL)); /* assign each local row to the closest cluster */ gk_startwctimer(params->compTmr); //BDMPI_sbload(rowptr); //BDMPI_sbload(rowind); //BDMPI_sbload(rowval); //BDMPI_sbload(centers); //BDMPI_sbload(cpart); lnmoves = 0; #pragma omp parallel default(none),\ shared(nrows, nclusters, rowptr, rowind, rowval, centers, cpart),\ private(i, j, k, offset),\ reduction(+:lnmoves) { float sims[nclusters]; #pragma omp for schedule(dynamic,32) for (i=0; i<nrows; i++) { for (k=0; k<nclusters; k++) sims[k] = 0.0; for (j=rowptr[i]; j<rowptr[i+1]; j++) { offset = rowind[j]*nclusters; for (k=0; k<nclusters; k++) sims[k] += rowval[j]*centers[offset+k]; } k = gk_fargmax(nclusters, sims, 1); if (k != cpart[i]) lnmoves++; cpart[i] = k; } } /* compute the new local centers */ gk_fset(nclusters*ncols, 0.0, centers); for (i=0; i<nrows; i++) { k = cpart[i]; for (j=rowptr[i]; j<rowptr[i+1]; j++) centers[rowind[j]*nclusters+k] += rowval[j]; } gk_stopwctimer(params->compTmr); if (params->mlock) GKWARN(BDMPI_munlockall() == 0); printf("[%03d]%04zu.%04zu.2 ts: %d\n", mype, trial, iter, (int)time(NULL)); /* compute the new global centers */ BDMPI_Reduce(centers, centers, nclusters*ncols, BDMPI_FLOAT, BDMPI_SUM, gmaxloc.loc, params->comm); if (mype == gmaxloc.loc) { if (params->mlock) GKWARN(BDMPI_mlock(centers, ncols*nclusters*sizeof(float)) == 0); for (k=0; k<nclusters; k++) dnorms[k] = 0.0; for (i=0; i<ncols; i++) { offset = i*nclusters; for (k=0; k<nclusters; k++) dnorms[k] += centers[offset+k]*centers[offset+k]; } for (crval=0.0, k=0; k<nclusters; k++) { if (dnorms[k] > 0) { crval += sqrt(dnorms[k]); dnorms[k] = 1.0/sqrt(dnorms[k]); } } for (i=0; i<ncols; i++) { offset = i*nclusters; for (k=0; k<nclusters; k++) centers[offset+k] *= dnorms[k]; } if (params->mlock) GKWARN(BDMPI_munlock(centers, ncols*nclusters*sizeof(float)) == 0); //printf("trial: %2zd; iter: %3zd; crval: %.8e; bcrval: %.8e\n", trial, iter, crval, bcrval); } else { gk_free((void **)&centers, LTERM); } /* see if you are done refining */ if (iter > 0) { BDMPI_Allreduce(&lnmoves, &gnmoves, 1, BDMPI_INT, BDMPI_SUM, params->comm); if (gnmoves == 0) break; } } BDMPI_Bcast(&crval, 1, BDMPI_FLOAT, gmaxloc.loc, params->comm); if (crval > bcrval) { gk_SWAP(cpart, bcpart, tptr); bcrval = crval; } gk_free((void **)&cpart, LTERM); if (mype == gmaxloc.loc) printf("[%3zu:%3zu] gnmoves: %8d; crval: %8.4e; bcrval: %8.4e [ts: %d]\n", trial, iter, gnmoves, crval, bcrval, (int)time(NULL)); } if (centers != NULL) gk_free((void **)&centers, LTERM); return bcpart; } /**************************************************************************/ /*! This function prints final statistics for the clustering solution. */ /**************************************************************************/ void ComputeClusteringStatistics(params_t *params, gk_csr_t *mat, int *cpart) { int npes=params->npes, mype=params->mype, nclusters=params->nclusters; size_t i, j, k, offset; int nrows, ncols, *rowind, *pwgts; ssize_t *rowptr; float *rowval, *centers, *dnorms; float crval, tcrval; nrows = mat->nrows; ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; centers = gk_fsmalloc(nclusters*ncols, 0.0, "centers"); pwgts = gk_ismalloc(nclusters, 0, "pwgts"); /* compute the local centers and local partition weights */ gk_fset(nclusters*ncols, 0.0, centers); for (i=0; i<nrows; i++) { k = cpart[i]; pwgts[k]++; for (j=rowptr[i]; j<rowptr[i+1]; j++) centers[rowind[j]*nclusters+k] += rowval[j]; } /* compute global centroids and partition weights */ BDMPI_Reduce(pwgts, pwgts, nclusters, BDMPI_INT, BDMPI_SUM, 0, params->comm); BDMPI_Reduce(centers, centers, nclusters*ncols, BDMPI_FLOAT, BDMPI_SUM, 0, params->comm); if (mype == 0) { dnorms = gk_fsmalloc(nclusters, 0.0, "dnorms"); for (i=0; i<ncols; i++) { offset = i*nclusters; for (k=0; k<nclusters; k++) dnorms[k] += centers[offset+k]*centers[offset+k]; } for (tcrval=0.0, k=0; k<nclusters; k++) { crval = (dnorms[k] > 0 ? sqrt(dnorms[k]) : 0.0); tcrval += crval; printf("Cluster: %4zu %6d %.4e\n", k, pwgts[k], crval); } printf("Overall: %.4e\n", tcrval); gk_free((void **)&dnorms, LTERM); } gk_free((void **)&centers, &pwgts, LTERM); } /**************************************************************************/ /*! Print residency information about the various elements of the matrix */ /**************************************************************************/ void printInCoreInfo(char *msg, int mype, gk_csr_t *mat) { size_t i, vlen, pagesize, addr, size, offset, counts1[2], counts2[2], counts3[2]; unsigned char *vec; struct rusage usage; char *ptr; pagesize = sysconf(_SC_PAGESIZE); vlen = mat->rowptr[mat->nrows]*sizeof(int)/pagesize + 10; vec = (unsigned char *)gk_malloc(sizeof(unsigned char *)*vlen, "vec"); counts1[0] = counts1[1] = 0; counts2[0] = counts2[1] = 0; counts3[0] = counts3[1] = 0; ptr = (char *)mat->rowptr; size = sizeof(ssize_t)*(mat->nrows+1); addr = (size_t)ptr; offset = addr%pagesize; ptr -= offset; size += offset; if (mincore(ptr, size, vec) == -1) printf("mincore error for rowptr: %s [%zu %zu %zu]\n", strerror(errno), pagesize, addr, addr%pagesize); else { vlen = (size+pagesize-1)/pagesize; for (i=0; i<vlen; i++) counts1[vec[i]&1]++; } ptr = (char *)mat->rowind; size = sizeof(int)*(mat->rowptr[mat->nrows]); addr = (size_t)ptr; offset = addr%pagesize; ptr -= offset; size += offset; if (mincore(ptr, size, vec) == -1) printf("mincore error for rowind: %s [%zu %zu %zu]\n", strerror(errno), pagesize, addr, addr%pagesize); else { vlen = (size+pagesize-1)/pagesize; for (i=0; i<vlen; i++) counts2[vec[i]&1]++; } ptr = (char *)mat->rowval; size = sizeof(int)*(mat->rowptr[mat->nrows]); addr = (size_t)ptr; offset = addr%pagesize; ptr -= offset; size += offset; if (mincore(ptr, size, vec) == -1) printf("mincore error for rowval: %s [%zu %zu %zu]\n", strerror(errno), pagesize, addr, addr%pagesize); else { vlen = (size+pagesize-1)/pagesize; for (i=0; i<vlen; i++) counts3[vec[i]&1]++; } gk_free((void **)&vec, LTERM); if (getrusage(RUSAGE_SELF, &usage) == -1) { printf("getrusage error: %s\n", strerror(errno)); } else { printf("[%03d]%s [%5zu %5zu] [%5zu %5zu] [%5zu %5zu] mrss: %ld minf: %ld majf: %ld\n", mype, msg, counts1[0], counts1[1], counts2[0], counts2[1], counts3[0], counts3[1], usage.ru_maxrss, usage.ru_minflt, usage.ru_majflt); } } #endif
4597.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp target teams distribute schedule(static, 8) for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
kmeans_cilk.c
#include <math.h> #include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include <cilk/cilk.h> #include <cilk/reducer_opadd.h> #define MICRO_IN_SEC 1000000.00 #define sqr(x) ((x)*(x)) #define MAX_ITERATIONS 100 #define DOUBLE_INFINITY (INFINITY) #define CLUSTER_NUM 1000 void kmeans( int dim, // dimension of data double *ipmatrix, // pointer to data int numele, // number of elements int cluster_num, // number of clusters double *centmatrix_final, // output cluster centroids int *cluster_assignment_final // output ); //__declspec(target(mic)) double begin_time, end_time; double begin_time, end_time; //__declspec(target(mic)) double microtime(){ int tv_sec,tv_usec; double time; struct timeval tv; struct timezone tz; gettimeofday(&tv,&tz); return tv.tv_sec+tv.tv_usec/MICRO_IN_SEC; } //calculate the distance between points p1 and p2 //__declspec(target(mic)) inline double calc_distance(int dim, double *p1, double *p2) { double distance_sq_sum = 0; int ii=0; for (ii = 0; ii < dim; ii++) distance_sq_sum += sqr(p1[ii] - p2[ii]); return distance_sq_sum; } inline void fail(char * const str) { printf("%s\n",str); exit(-1); } //print the final result void cluster_diag(int dim, int n, int k, double *X, int *cluster_assignment_index, double *cluster_centroid) { int i; int cluster_member_count[CLUSTER_NUM]; for(i=0; i < k; i++) { cluster_member_count[i] = 0; } for(i=0; i < n; i++) { cluster_member_count[ cluster_assignment_index[i] ]++; } printf(" Final clusters \n"); for ( i = 0; i < k; i++) printf(" cluster %d: members: %8d, centroid (%.1f %.1f) \n", i, cluster_member_count[i], cluster_centroid[i*dim + 0], cluster_centroid[i*dim + 1]); } void copy_result(int in, int * const isrc, int * const itgt, int dn, double * const dsrc, double * const dtgt) { int i; for (i = 0; i < in; i++) itgt[i] = isrc[i]; for(i = 0; i < dn; i++) dtgt[i] = dsrc[i]; } int main(int argc, char** argv) { int numele=0; //number of rows int dim = 0; //number of columns int i,j,k; FILE *ipfile; //input file begin_time = microtime(); if ((ipfile = fopen(argv[1],"r"))==NULL) { printf("Error: can't open input file %s\n",argv[1]); exit(1); } if(fscanf(ipfile,"# num rows=%d num columns=%d",&numele,&dim)!=2) { printf("Format error in first line\n"); exit(1); } else { printf("num rows=%d num columns=%d\n",numele,dim); } double * ipmatrix=(double *)malloc(numele*dim*sizeof(double)); //ipmatrix is the pointer of the array of input matrix if (ipmatrix==NULL) { printf(" malloc failed\n"); exit(1); } for (i=0; i<numele; i++) { for (j=0; j<dim; j++) { fscanf(ipfile,"%lf",&(ipmatrix[i*dim+j])); } } int * cluster_assignment_final = (int *) malloc(sizeof(int)*numele); //the cluster assignment of every row data of ipmatrix double * centmatrix_final = (double *) malloc (CLUSTER_NUM*dim*sizeof(double)); //centmatrix is the pointer of the array of initial centroid kmeans(dim, ipmatrix, numele, CLUSTER_NUM, centmatrix_final, cluster_assignment_final); //call kmeans printf("successful\n"); cluster_diag(dim, numele, CLUSTER_NUM, ipmatrix, cluster_assignment_final, centmatrix_final); free(ipmatrix); free(cluster_assignment_final); free(centmatrix_final); return 0; } int kmeans_itera_operator( int dim, double *ipmatrix, int numele, int cluster_num, double *centmatrix_cur, double *centmatrix_pre, int *cluster_assignment_cur, int *cluster_assignment_pre, double *tot_D ) { int *cluster_count = (int *)malloc(sizeof(int) * cluster_num); // int change_count = 0; int i,j,cluster_assignment; cluster_assignment=0; double min_D,dist; // init // double tot_D_tmp = 0; for(i=0; i < cluster_num; i++) { cluster_count[i] = 0; for(j=0; j < dim; j++) { centmatrix_cur[i*dim+j] = 0; } } end_time = microtime(); printf("serial part cost time: %fs\n",end_time-begin_time); begin_time = end_time; //calculate all distance and choose the cluster, at the same time calculate total distance and new centroids. /* #pragma offload target(mic) \ in(ipmatrix:length(numele*dim) alloc_if(1) free_if(1)) \ in(centmatrix_pre:length(cluster_num*dim) alloc_if(1) free_if(1)) \ in(cluster_assignment_pre:length(numele) alloc_if(1) free_if(1)) \ out(cluster_assignment_cur:length(numele) alloc_if(1) free_if(1)) */ cilk::reducer_opadd<double> tot_D_tmp(0); cilk::reducer_opadd<int> change_count(0); //double tot_D_tmp=0; //int change_count=0; //#pragma omp parallel for private(min_D, cluster_assignment, dist) reduction(+:tot_D_tmp, change_count) cilk_for(int i=0; i < numele; i++) { min_D = DOUBLE_INFINITY; cluster_assignment = -1; //calculate the distance and choose the cluster for(int j=0; j < cluster_num; j++) { dist = calc_distance(dim, ipmatrix+i*dim, centmatrix_pre+j*dim); if(dist < min_D) { min_D = dist; cluster_assignment = j; } } if(cluster_assignment != cluster_assignment_pre[i]) { change_count++ ; } //calculate the total distance and new centroids cluster_assignment_cur[i] = cluster_assignment; tot_D_tmp += min_D; } end_time = microtime(); printf("parallel part cost time: %fs\n",end_time-begin_time); begin_time = end_time; for(i=0; i< numele; i++) { cluster_count[ cluster_assignment_cur[i] ]++; for(j=0; j < dim; j++) { centmatrix_cur[ cluster_assignment_cur[i] * dim + j] += ipmatrix[i*dim + j]; } } // for(i=0;i<20; i++) // printf("cluster%d count:%d\n",i,cluster_count[i]); //*tot_D = tot_D_tmp; *tot_D = tot_D_tmp.get_value(); for(i=0; i < cluster_num; i++) { if( cluster_count[i] < 1 ) continue; for(j=0; j < dim; j++) { centmatrix_cur[i*dim + j] = centmatrix_cur[i*dim + j] / cluster_count[i]; } } end_time = microtime(); printf("serail part cost time: %fs\n",end_time-begin_time); // if (cluster_count !=NULL) // free(cluster_count); //return change_count; return change_count.get_value(); } void kmeans( int dim, // dimension of data double *ipmatrix, // pointer to data int numele, // number of elements int cluster_num, // number of clusters double *centmatrix_final, // output cluster centroids int *cluster_assignment_final // output ) { int *cluster_assignment_cur = (int *)malloc(sizeof(int) * numele); int *cluster_assignment_pre = (int *)malloc(sizeof(int) * numele); double *centmatrix_cur = (double *)malloc(sizeof(double) * cluster_num * dim); double *centmatrix_pre = (double *)malloc(sizeof(double) * cluster_num * dim); int i, j, *cluster_assignment_tmp; double totD, prev_totD, *centmatrix_tmp; if (!cluster_assignment_cur || !cluster_assignment_pre || !centmatrix_pre || !centmatrix_cur) fail("Error allocating memory!"); // initial setup //choose the first CLUSTER_NUM of ipmatrix as the centmatrix for( i=0; i< cluster_num * dim ; i++) { centmatrix_pre[i] = ipmatrix[i]; } kmeans_itera_operator(dim, ipmatrix, numele, cluster_num, centmatrix_cur, centmatrix_pre, cluster_assignment_cur, cluster_assignment_pre, &totD); prev_totD = DOUBLE_INFINITY; exit(0); // Iteration calculate int batch_iteration = MAX_ITERATIONS; while ( batch_iteration-- ) { printf(" batch_iteration = %d\n", batch_iteration); // cluster_diag(dim, numele, CLUSTER_NUM, ipmatrix, cluster_assignment_cur, centmatrix_cur); prev_totD = totD; cluster_assignment_tmp = cluster_assignment_cur; cluster_assignment_cur = cluster_assignment_pre; cluster_assignment_pre = cluster_assignment_tmp; centmatrix_tmp = centmatrix_cur; centmatrix_cur = centmatrix_pre; centmatrix_pre = centmatrix_tmp; if(kmeans_itera_operator(dim, ipmatrix, numele, cluster_num, centmatrix_cur, centmatrix_pre, cluster_assignment_cur, cluster_assignment_pre, &totD) == 0) { break; } if( prev_totD < totD ) { printf("%lf %lf\n",prev_totD,totD); cluster_assignment_tmp = cluster_assignment_cur; cluster_assignment_cur = cluster_assignment_pre; cluster_assignment_pre = cluster_assignment_tmp; centmatrix_tmp = centmatrix_cur; centmatrix_cur = centmatrix_pre; centmatrix_pre = centmatrix_tmp; break; } } // cluster_diag(dim, numele, CLUSTER_NUM, ipmatrix, cluster_assignment_cur, centmatrix_cur); copy_result(numele, cluster_assignment_cur, cluster_assignment_final, cluster_num * dim, centmatrix_cur, centmatrix_final); free(cluster_assignment_cur); free(cluster_assignment_pre); free(centmatrix_cur); free(centmatrix_pre); }
GB_binop__eq_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__eq_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__eq_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__eq_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_fp32) // A*D function (colscale): GB (_AxD__eq_fp32) // D*A function (rowscale): GB (_DxB__eq_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__eq_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__eq_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_fp32) // C=scalar+B GB (_bind1st__eq_fp32) // C=scalar+B' GB (_bind1st_tran__eq_fp32) // C=A+scalar GB (_bind2nd__eq_fp32) // C=A'+scalar GB (_bind2nd_tran__eq_fp32) // C type: bool // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_FP32 || GxB_NO_EQ_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__eq_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__bclr_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bclr_int16 // A.*B function (eWiseMult): GB_AemultB__bclr_int16 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bclr_int16 // C+=b function (dense accum): GB_Cdense_accumb__bclr_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bclr_int16 // C=scalar+B GB_bind1st__bclr_int16 // C=scalar+B' GB_bind1st_tran__bclr_int16 // C=A+scalar GB_bind2nd__bclr_int16 // C=A'+scalar GB_bind2nd_tran__bclr_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = GB_BITCLR (aij, bij, int16_t, 16) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_BITCLR (x, y, int16_t, 16) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_INT16 || GxB_NO_BCLR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bclr_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bclr_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bclr_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bclr_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bclr_int16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bclr_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = GB_BITCLR (x, bij, int16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bclr_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = GB_BITCLR (aij, y, int16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (x, aij, int16_t, 16) ; \ } GrB_Info GB_bind1st_tran__bclr_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = GB_BITCLR (aij, y, int16_t, 16) ; \ } GrB_Info GB_bind2nd_tran__bclr_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
meta.c
int main() { #pragma omp metadirective for(int i=0; i<10; i++) ; return 0; }
elemwise_binary_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file elemwise_binary_op.h * \brief Function definition of elementwise binary operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #include <mxnet/operator_util.h> #include <mxnet/op_attr_types.h> #include <vector> #include <string> #include <utility> #include <typeinfo> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "elemwise_unary_op.h" #include "../../common/utils.h" namespace mxnet { namespace op { /*! Gather binary operator functions into ElemwiseBinaryOp class */ class ElemwiseBinaryOp : public OpBase { public: template<typename OP, int Req> struct BackwardUseNoneOp { template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *igrad, const DType *ograd) { KERNEL_ASSIGN(igrad[i], Req, OP::Map(ograd[i])); } }; template<typename OP, int Req> struct BackwardUseInOp { template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *igrad, const DType *ograd, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(igrad[i], Req, ograd[i] * OP::Map(lhs[i], rhs[i])); } }; /*! \brief For sparse, assume missing rvalue is 0 */ template<typename OP, int Req> struct MissingRValueOp { template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0))); } }; /*! \brief For sparse, assume missing lvalue is 0 */ template<typename OP, int Req> struct MissingLValueOp { template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i])); } }; private: /*! * \brief CSR operation requires temp space */ enum ResourceRequestType { kTempSpace }; /*! * \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input * CPU-Only version */ template<typename DType, typename OP> static inline size_t FillDense(mshadow::Stream<cpu> *s, const size_t idx_l, const size_t idx_r, const OpReqType req, mshadow::Tensor<cpu, 2, DType> *out, const size_t iter_out) { const int index_out_min = std::min(idx_l, idx_r); if (static_cast<size_t>(index_out_min) > iter_out) { const size_t size = (*out)[iter_out].shape_.Size(); const DType zero_input_val = OP::Map(DType(0), DType(0)); #pragma omp parallel for for (int i = iter_out; i < index_out_min; ++i) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { SerialLaunchCPU<OpBase::set_to_scalar<Req>>(s, size, (*out)[i].dptr_, zero_input_val); }); } } return index_out_min; } static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) { return a1.var() == a2.var(); } /*! \brief Minimum of three */ static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) { return a < b ? (a < c ? a : c) : (b < c ? b : c); } template<typename xpu, typename LOP, typename ROP, typename DType> static void BackwardUseNone_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; Stream<xpu> *s = ctx.get_stream<xpu>(); const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes); const DType *ograd_dptr = inputs[0].dptr<DType>(); if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>()); } else if (req[0] != kNullOp) { DType *lgrad_dptr = outputs[0].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { Kernel<BackwardUseNoneOp<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr); }); } if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>()); } else if (req[1] != kNullOp) { DType *rgrad_dptr = outputs[1].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { Kernel<BackwardUseNoneOp<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr); }); } } template<typename xpu, typename LOP, typename ROP, typename DType> static void BackwardUseIn_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(outputs.size(), 2U); DCHECK_EQ(inputs.size(), 3U); mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>(); const DType *ograd_dptr = inputs[0].dptr<DType>(); const DType *lhs_dptr = inputs[1].dptr<DType>(); const DType *rhs_dptr = inputs[2].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { const int size = static_cast<int>( (outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * lgrad_dptr = outputs[0].dptr<DType>(); mxnet_op::Kernel<BackwardUseInOp<LOP, Req>, xpu>::Launch( s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { const int size = static_cast<int>( (outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * rgrad_dptr = outputs[1].dptr<DType>(); mxnet_op::Kernel<BackwardUseInOp<ROP, Req>, xpu>::Launch( s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); } template< typename xpu, typename LOP, typename ROP, typename DType, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false, typename BackupCompute> static inline void BackwardUseInEx_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs, BackupCompute backup_compute) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); // lhs grad if (req[0] != kNullOp) { // RspRspOp can handle dense outputs so long as OP(0, 0) == 0 MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, LOP>( s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0], false, false, false, false); }); // lhs in-place MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, mshadow::op::mul>( s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0], false, false, true, false); }); } // rhs grad if (req[1] != kNullOp) { MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, ROP>( s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1], false, false, false, false); }); // rhs in-place MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, mshadow::op::mul>( s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1], false, false, true, false); }); } } protected: /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename DType, typename IType, typename OP> static void RspRspOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename DType, typename IType, typename CType, typename OP> static inline void CsrCsrOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); public: /*! * \brief Rsp-op-Rsp operation which produces a dense result * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); /*! * \brief Allow one of the inputs to be dense and still produce a sparse output * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ template<bool lhs_dense_ok = true, bool rhs_dense_ok = true> static bool AllowLRDenseInputWithSparseOutputStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name; CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name; const auto& lhs_stype = in_attrs->at(0); const auto& rhs_stype = in_attrs->at(1); auto& out_stype = out_attrs->at(0); bool dispatched = false; const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && lhs_stype == kDefaultStorage && rhs_stype == kDefaultStorage) { // dns, dns -> dns dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched) { if ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (rhs_dense_ok && lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_dense_ok && lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) { // rsp, rsp -> rsp // rsp, dns -> rsp // dns, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { // csr, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex); } else if ((lhs_stype == kCSRStorage && rhs_dense_ok) || (rhs_stype == kCSRStorage && lhs_dense_ok)) { // csr, dns -> csr // dns, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, DispatchMode::kFComputeFallback); } } if (!dispatched) { dispatch_fallback(out_attrs, dispatch_mode); } if (*dispatch_mode == DispatchMode::kFComputeFallback) { LogStorageFallback(attrs, dev_mask, in_attrs, out_attrs); } return true; } /*! * \brief Backward pass computing input gradient using forward inputs * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] != kNullOp) { Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); }); }); } } template<typename xpu, typename OP> static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] != kNullOp) { Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); }); }); } } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); if ((common::ContainsOnlyStorage(inputs, kRowSparseStorage)) && (out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) { // rsp, rsp -> rsp // rsp, rsp -> dns const int rsp_input_idx = lhs_stype == kRowSparseStorage ? 0 : 1; MSHADOW_IDX_TYPE_SWITCH(inputs[rsp_input_idx].aux_type(rowsparse::kIdx), IType, { MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { RspRspOp<DType, IType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false); }); }); } else if (common::ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) { // csr, csr -> csr MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIdx), IType, { MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIndPtr), CType, { MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { CsrCsrOp<DType, IType, CType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]); }); }); }); } else { LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs); } } /*! \brief ComputeEx allowing dense lvalue and/or rvalue */ template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense> static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) && ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && lhs_may_be_dense && rhs_may_be_dense) { // rsp, rsp -> rsp // rsp, rsp -> dns // rsp, dns -> rsp // dns, rsp -> rsp // More than once dense not allowed (this will be checked in RspRspOp): // rsp, dns -> dns <-- NOT ALLOWED // dns, rsp -> dns <-- NOT ALLOWED mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(outputs[0].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false); }); }); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs); } else { LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 1U); // output grad CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto in_stype = inputs[0].storage_type(); const auto lhs_stype = outputs[0].storage_type(); const auto rhs_stype = outputs[1].storage_type(); // lhs grad if (req[0] != kNullOp) { if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> rsp, _. op requires 0-input returns 0-output DCHECK_LT(fabs(static_cast<float>(LOP::Map(0))), 1e-5f); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { UnaryOp::KernelComputeEx<xpu, BackwardUseNoneOp<LOP, Req>>(attrs, ctx, inputs, req, {outputs[0]}); }); } else { LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs); } } // rhs grad if (req[1] != kNullOp) { if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> _, rsp. op requires 0-input returns 0-output DCHECK_LT(fabs(static_cast<float>(ROP::Map(0))), 1e-5f); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { UnaryOp::KernelComputeEx<xpu, BackwardUseNoneOp<ROP, Req>>(attrs, ctx, inputs, req, {outputs[1]}); }); } else { LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs); } } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template< typename xpu, typename LOP, typename ROP, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false> static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 3U); CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto lhs_grad_stype = outputs[0].storage_type(); const auto rhs_grad_stype = outputs[1].storage_type(); if (ContainsOnlyStorage(inputs, kRowSparseStorage) && (lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) && (rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) { // rsp, rsp, rsp -> [dns, rsp], [dns, rsp] MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { BackwardUseInEx_<xpu, LOP, ROP, DType, in0_ok_dense, in1_ok_dense, in2_ok_dense>( attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>); }); } } }; // class ElemwiseBinaryOp /*! \brief Binary launch */ #define MXNET_OPERATOR_REGISTER_BINARY(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(2) \ .set_num_outputs(1) \ .set_attr<nnvm::FListInputNames>("FListInputNames", \ [](const NodeAttrs& attrs) { \ return std::vector<std::string>{"lhs", "rhs"}; \ }) \ .set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \ }) \ .add_argument("lhs", "NDArray-or-Symbol", "first input") \ .add_argument("rhs", "NDArray-or-Symbol", "second input") /*! \brief Binary launch, with FComputeEx for csr and rsp available */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseStorageType<2, 1, true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, dense result * FInferStorageType attr is not set using this macro. * By default DefaultStorageType is used. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::SparseSparseWithDenseResult) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
GB_binop__lor_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lor_uint8 // A.*B function (eWiseMult): GB_AemultB__lor_uint8 // A*D function (colscale): GB_AxD__lor_uint8 // D*A function (rowscale): GB_DxB__lor_uint8 // C+=B function (dense accum): GB_Cdense_accumB__lor_uint8 // C+=b function (dense accum): GB_Cdense_accumb__lor_uint8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lor_uint8 // C=scalar+B GB_bind1st__lor_uint8 // C=scalar+B' GB_bind1st_tran__lor_uint8 // C=A+scalar GB_bind2nd__lor_uint8 // C=A'+scalar GB_bind2nd_tran__lor_uint8 // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = ((x != 0) || (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_UINT8 || GxB_NO_LOR_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lor_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lor_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lor_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lor_uint8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lor_uint8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__lor_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lor_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lor_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t bij = Bx [p] ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lor_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__lor_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__lor_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
laplace2d.c
#include <math.h> #include <string.h> #include "timer.h" #define NN 4096 #define NM 4096 double A[NN][NM]; double Anew[NN][NM]; int main(int argc, char** argv) { const int n = NN; const int m = NM; const int iter_max = 1000; const double tol = 1.0e-6; double error = 1.0; memset(A, 0, n * m * sizeof(double)); memset(Anew, 0, n * m * sizeof(double)); for (int j = 0; j < n; j++) { A[j][0] = 1.0; Anew[j][0] = 1.0; } printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m); StartTimer(); int iter = 0; #pragma acc data copy(A), create(Anew) while ( error > tol && iter < iter_max ) { error = 0.0; //#pragma omp parallel for shared(m, n, Anew, A) #pragma acc kernels for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1] + A[j-1][i] + A[j+1][i]); error = fmax( error, fabs(Anew[j][i] - A[j][i])); } } //#pragma omp parallel for shared(m, n, Anew, A) #pragma acc kernels for( int j = 1; j < n-1; j++) { for( int i = 1; i < m-1; i++ ) { A[j][i] = Anew[j][i]; } } if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error); iter++; } double runtime = GetTimer(); printf(" total: %f s\n", runtime / 1000); }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,3);t1++) { lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6)); ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(3*t1,2)),ceild(24*t2-Nz+5,8)),3*t1-3*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(12*t1+Ny+15,8)),floord(24*t2+Ny+11,8)),floord(24*t1-24*t2+Nz+Ny+13,8));t3++) { for (t4=max(max(max(max(0,ceild(3*t1-3*t2-254,256)),ceild(3*t1-510,512)),ceild(24*t2-Nz-2035,2048)),ceild(8*t3-Ny-2035,2048));t4<=min(min(min(min(floord(4*Nt+Nx-9,2048),floord(12*t1+Nx+15,2048)),floord(24*t2+Nx+11,2048)),floord(8*t3+Nx-5,2048)),floord(24*t1-24*t2+Nz+Nx+13,2048));t4++) { for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(2048*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),2*t3),Nt-1),3*t1+5),6*t2+4),512*t4+510);t5++) { for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(2048*t4,4*t5+4); ubv=min(2048*t4+2047,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }